* WIP * fixing layouts and assets updating Gemfile.lock with bundle install (rvm use ruby-2.2.9) Updated links in header and footer switch to paginate v2 gem comment sidebar logic fixing css regression retry paginate updated local ruby version reverting to original pagination gem remove text, make heading roboto, next/prev as buttons * reintoduce dir tree changes format checking new posts shrink fab-icons down to 33px * reintoduce dir tree changes format checking new posts shrink fab-icons down to 33px * next/prev buttons on posts and index * Change "latest" to v1.10 * rename .md to match old blog and added explict permalinks for urls * replacing a link to old blogpull/7956/head
parent
0a6b5b7d73
commit
4e22f60956
4
Gemfile
4
Gemfile
|
|
@ -44,10 +44,10 @@ group :jekyll_plugins do
|
|||
gem "jekyll-theme-tactile", "0.0.3"
|
||||
gem "jekyll-theme-time-machine", "0.0.3"
|
||||
gem "jekyll-titles-from-headings", "~> 0.1"
|
||||
gem "jekyll-include-cache", "~> 0.1"
|
||||
gem 'jekyll-youtube', '~> 1.0'
|
||||
end
|
||||
|
||||
gem "jekyll-include-cache", "~> 0.1"
|
||||
|
||||
gem "kramdown", "~> 1.11"
|
||||
gem "rouge", "~> 2.0"
|
||||
gem "pry"
|
||||
|
|
|
|||
51
Gemfile.lock
51
Gemfile.lock
|
|
@ -12,9 +12,9 @@ GEM
|
|||
ethon (0.11.0)
|
||||
ffi (>= 1.3.0)
|
||||
execjs (2.7.0)
|
||||
faraday (0.13.1)
|
||||
faraday (0.14.0)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
ffi (1.9.18)
|
||||
ffi (1.9.21)
|
||||
forwardable-extended (2.6.0)
|
||||
jekyll (3.6.0)
|
||||
addressable (~> 2.4)
|
||||
|
|
@ -33,11 +33,11 @@ GEM
|
|||
coffee-script (~> 2.2)
|
||||
jekyll-default-layout (0.1.4)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-feed (0.9.2)
|
||||
jekyll-feed (0.9.3)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-gist (1.4.1)
|
||||
jekyll-gist (1.5.0)
|
||||
octokit (~> 4.2)
|
||||
jekyll-github-metadata (2.9.3)
|
||||
jekyll-github-metadata (2.9.4)
|
||||
jekyll (~> 3.1)
|
||||
octokit (~> 4.0, != 4.4.0)
|
||||
jekyll-include-cache (0.1.0)
|
||||
|
|
@ -49,13 +49,13 @@ GEM
|
|||
jekyll (~> 3.0)
|
||||
jekyll-redirect-from (0.13.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-relative-links (0.5.1)
|
||||
jekyll-relative-links (0.5.2)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-sass-converter (1.5.0)
|
||||
jekyll-sass-converter (1.5.2)
|
||||
sass (~> 3.4)
|
||||
jekyll-seo-tag (2.3.0)
|
||||
jekyll-seo-tag (2.4.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-sitemap (1.1.1)
|
||||
jekyll-sitemap (1.2.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-swiss (0.4.0)
|
||||
jekyll-theme-architect (0.0.3)
|
||||
|
|
@ -86,35 +86,41 @@ GEM
|
|||
jekyll (~> 3.3)
|
||||
jekyll-theme-time-machine (0.0.3)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-titles-from-headings (0.5.0)
|
||||
jekyll-titles-from-headings (0.5.1)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-watch (1.5.0)
|
||||
listen (~> 3.0, < 3.1)
|
||||
jekyll-watch (1.5.1)
|
||||
listen (~> 3.0)
|
||||
jekyll-youtube (1.0.0)
|
||||
jekyll
|
||||
json (1.8.6)
|
||||
kramdown (1.15.0)
|
||||
kramdown (1.16.2)
|
||||
liquid (4.0.0)
|
||||
listen (3.0.8)
|
||||
listen (3.1.5)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
ruby_dep (~> 1.2)
|
||||
mercenary (0.3.6)
|
||||
method_source (0.9.0)
|
||||
minima (2.1.1)
|
||||
jekyll (~> 3.3)
|
||||
minima (2.3.0)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-feed (~> 0.9)
|
||||
jekyll-seo-tag (~> 2.1)
|
||||
multipart-post (2.0.0)
|
||||
octokit (4.7.0)
|
||||
octokit (4.8.0)
|
||||
sawyer (~> 0.8.0, >= 0.5.3)
|
||||
pathutil (0.16.0)
|
||||
pathutil (0.16.1)
|
||||
forwardable-extended (~> 2.6)
|
||||
pry (0.11.2)
|
||||
pry (0.11.3)
|
||||
coderay (~> 1.1.0)
|
||||
method_source (~> 0.9.0)
|
||||
public_suffix (3.0.0)
|
||||
public_suffix (3.0.2)
|
||||
rb-fsevent (0.10.2)
|
||||
rb-inotify (0.9.10)
|
||||
ffi (>= 0.5.0, < 2)
|
||||
rouge (2.2.1)
|
||||
ruby_dep (1.5.0)
|
||||
safe_yaml (1.0.4)
|
||||
sass (3.5.3)
|
||||
sass (3.5.5)
|
||||
sass-listen (~> 4.0.0)
|
||||
sass-listen (4.0.0)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
|
|
@ -164,6 +170,7 @@ DEPENDENCIES
|
|||
jekyll-theme-tactile (= 0.0.3)
|
||||
jekyll-theme-time-machine (= 0.0.3)
|
||||
jekyll-titles-from-headings (~> 0.1)
|
||||
jekyll-youtube (~> 1.0)
|
||||
json (~> 1.7, >= 1.7.7)
|
||||
kramdown (~> 1.11)
|
||||
minima (~> 2.0)
|
||||
|
|
@ -173,4 +180,4 @@ DEPENDENCIES
|
|||
unicode-display_width (~> 1.1)
|
||||
|
||||
BUNDLED WITH
|
||||
1.15.4
|
||||
1.16.1
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -9,7 +9,7 @@ build: ## Build site with production settings and put deliverables in _site.
|
|||
bundle exec jekyll build
|
||||
|
||||
build-preview: ## Build site with drafts and future posts enabled.
|
||||
bundle exec jekyll build --drafts --future
|
||||
bundle exec jekyll build --drafts --future --trace
|
||||
|
||||
serve: ## Boot the development server.
|
||||
bundle exec jekyll serve
|
||||
|
|
|
|||
|
|
@ -14,6 +14,10 @@ safe: false
|
|||
lsi: false
|
||||
|
||||
latest: "v1.10"
|
||||
|
||||
paginate: 7
|
||||
paginate_path: "/blog/page:num/"
|
||||
|
||||
defaults:
|
||||
-
|
||||
scope:
|
||||
|
|
@ -66,6 +70,8 @@ plugins:
|
|||
- jekyll-sitemap
|
||||
- jekyll-seo-tag
|
||||
- jekyll-include-cache
|
||||
- jekyll-paginate
|
||||
- jekyll-youtube
|
||||
# disabled gems
|
||||
# - jekyll-redirect-from
|
||||
|
||||
|
|
@ -90,3 +96,4 @@ tocs:
|
|||
- samples
|
||||
- search
|
||||
- imported
|
||||
- blog
|
||||
|
|
|
|||
|
|
@ -0,0 +1,227 @@
|
|||
<div>
|
||||
<span style="font-family: Open Sans;"><span style="font-size: 13px; line-height: 17.9400005340576px; white-space: pre-wrap;"><br /></span></span></div>
|
||||
<div>
|
||||
<span style="font-family: 'Open Sans'; font-size: 13px; line-height: 17.9400005340576px; white-space: pre-wrap;">Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.</span></div>
|
||||
<div>
|
||||
<span style="font-family: 'Open Sans'; font-size: 13px; line-height: 17.9400005340576px; white-space: pre-wrap;"><br /></span></div>
|
||||
<div>
|
||||
<span style="font-family: Open Sans;"><span style="font-size: 13px; line-height: 17.9400005340576px; white-space: pre-wrap;">Agenda:</span></span></div>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Quinton - Cluster federation</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: #1155cc; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;">doc: </span><a href="http://tinyurl.com/ubernetes" style="text-decoration: none;"><span style="background-color: transparent; color: #1155cc; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;">http://tinyurl.com/ubernetes</span></a></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<a href="https://docs.google.com/presentation/d/1dWKNzjYM6ZYjFXpPwamikC6_BV3PDe33NAK9zR_M7jg/edit?usp=sharing" style="text-decoration: none;"><span style="background-color: transparent; color: #1155cc; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;">slides</span></a></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Satnam - Performance benchmarking update</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<b id="docs-internal-guid-ae742fb5-80cb-c8b0-6fc8-e1dc4853480c" style="font-weight: normal;"><br /></b>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Notes from meeting:</span></div>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Quinton - Cluster federation</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Ideas floating around after meetup in SF</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Document: </span><span style="background-color: transparent; color: #1155cc; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;">doc: </span><a href="http://tinyurl.com/ubernetes" style="text-decoration: none;"><span style="background-color: transparent; color: #1155cc; font-family: Arial; font-size: 15px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;">http://tinyurl.com/ubernetes</span></a></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Please read and comment</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Not 1.0, but put a doc together to show roadmap</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Can be built outside of Kubernetes</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">API to control things across multiple clusters, include some logic</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Auth(n)(z)</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Scheduling Policies</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">…</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Different reasons for cluster federation</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Zone (un) availability : Resilient to zone failures</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Hybrid cloud: some in cloud, some on prem. for various reasons</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Avoid cloud provider lock-in. For various reasons</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">“Cloudbursting” - automatic overflow into the cloud</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Hard problems</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Location affinity. How close do pods need to be? </span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Workload coupling</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Absolute location (e.g. eu data needs to be in eu)</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Cross cluster service discovery</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">How does service/DNS work across clusters</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Cross cluster workload migration</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">How do you move an application piece by piece across clusters?</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Cross cluster scheduling</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">How do know enough about clusters to know where to schedule</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Possibly use a cost function to achieve affinities with minimal complexity</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Can also use cost to determine where to schedule (under used clusters are cheaper than over-used clusters)</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Implicit requirements</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Cross cluster integration shouldn’t create cross-cluster failure modes</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Independently usable in a disaster situation where Ubernetes dies.</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Unified visibility</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Want to have unified monitoring, alerting, logging, introspection, ux, etc.</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Unified quota and identity management</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Want to have user database and auth(n)/(z) in a single place</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Important to note, most causes of software failure are not the infrastructure</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Botched software upgrades</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Botched config upgrades</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Botched key distribution</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Overload</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Failed external dependencies</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Discussion:</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Where do you draw the “ubernetes” line</span></div>
|
||||
</li>
|
||||
<ol style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Likely at the availability zone, but could be at the rack, or the region</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: lower-roman; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Important to not pigeon hole and prevent other users</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
</ul>
|
||||
</ol>
|
||||
<br />
|
||||
<ol start="2" style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Satnam - Soak Test</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Want to measure things that run for a long time to make sure that the cluster is stable over time. Performance doesn’t degrade, no memory leaks, etc.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">github.com/GoogleCloudPlatform/kubernetes/test/soak/…</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Single binary, puts lots of pods on each node, and queries each pod to make sure that it is running.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Pods are being created much, much more quickly (even in the past week) to make things go more quickly.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Once the pods are up running, we hit the pods via the proxy. Decision to hit the proxy was deliberate so that we test the kubernetes apiserver.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Code is already checked in.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Pin pods to each node, exercise every pod, make sure that you get a response for each node.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Single binary, run forever.</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: decimal; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Brian - v1beta3 is enabled by default, v1beta1 and v1beta2 deprecated, turned off in June. Should still work with upgrading existing clusters, etc.</span></div>
|
||||
</li>
|
||||
</ol>
|
||||
<div style='clear: both;'></div>
|
||||
|
|
@ -0,0 +1,176 @@
|
|||
<!-- Copied Nested List HTML from blogger site to force appropriate render in jekyll -->
|
||||
<!-- See Kramdown Issue for details on existing issue with nested list in kramdown :https://github.com/gettalong/kramdown/issues/368 -->
|
||||
|
||||
<div class="nested-list">
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.</span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;"><br /></span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Agenda:</span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">- Andy - demo remote execution and port forwarding</span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">- Quinton - Cluster federation - Postponed</span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">- Clayton - UI code sharing and collaboration around Kubernetes</span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;"><br /></span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Notes from meeting:</span></div>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">1. Andy from RedHat:</span></div>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Demo remote execution</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: transparent; color: black; font-family: 'Open Sans'; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">kubectl exec -p $POD -- $CMD</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Makes a connection to the master as proxy, figures out which node the pod is on, proxies connection to kubelet, which does the interesting bit. via nsenter.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Multiplexed streaming over HTTP using SPDY</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Also interactive mode:</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: square; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;"> kubectl exec -p $POD -it -- bash -il</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Assumes first container. Can use -c $CONTAINER to pick a particular one.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">If have gdb pre-installed in container, then can interactively attach it to running process</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: square; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">backtrace, symbol tbles, print, etc. Most things you can do with gdb.</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Can also with careful flag crafting run rsync over this or set up sshd inside container.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Some feedback via chat:</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: square; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">UI feedback: things like kubectl log do not take a -p/-c they just expect it to be there.....</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">right</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">i think we would remove -p eventually</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">we were just trying to be cautious because we knew we needed to support CLI args afterwards</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">and everyone gets that wrong the first time</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: square; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">-- is hard</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">cobra had bugs with it</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">but that's what we support</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.3499999999999999; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #222222; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">also we want to bypass shell args in some cases</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
</ul>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Andy also demoed port forwarding</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">kubectl port-forward -p $POD $LOCALPORT:$REMOTEPORT &</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">localhost% http :$LOCALPORT</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">nsenter vs. docker exec</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">want to inject a binary under control of the host, similar to pre-start hooks</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">socat, nsenter, whatever the pre-start hook needs</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">would be nice to blog post on this</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">how to use it. or even a screencast?</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">we have a youtube channel and a blog for kubernetes.io</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">version of nginx in wheezy is too old to support needed master-proxy functionality</span></div>
|
||||
</li>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: circle; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">but wheezy-backports seems to have an ok version, so we should pull that in.</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
</ul>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">2. Clayton: where are we wrt a community organization for e.g. kubernetes UI components?</span></div>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">google-containers-ui IRC channel, mailing list.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Tim: google-containers prefix is historical, should just do “kubernetes-ui”</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">also want to put design resources in, and bower expects its own repo.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">General agreement</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">3. Brian Grant:</span></div>
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Testing v1beta3, getting that ready to go in.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Paul working on changes to commandline stuff.</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Early to mid next week, try to enable v1beta3 by default?</span></div>
|
||||
</li>
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">For any other changes, file issue and CC thockin.</span></div>
|
||||
</li>
|
||||
</ul>
|
||||
<div dir="ltr" style="line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">4. General consensus that 30 minutes is better than 60</span></div>
|
||||
<br />
|
||||
<ul style="margin-bottom: 0pt; margin-top: 0pt;">
|
||||
<li dir="ltr" style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;"><span style="background-color: white; color: #212121; font-family: Arial; font-size: 13px; font-style: normal; font-variant: normal; font-weight: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;">Shouldn’t artificially try to extend just to fill time.</span></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
<footer>
|
||||
<footer col="12">
|
||||
<main class="light-text">
|
||||
<nav>
|
||||
<a href="/docs/tutorials/stateless-application/hello-minikube/">Get Started</a>
|
||||
<a href="/docs/home/">Documentation</a>
|
||||
<a href="http://blog.kubernetes.io/">Blog</a>
|
||||
<a href="/blog">Blog</a>
|
||||
<a href="/partners/">Partners</a>
|
||||
<a href="/community/">Community</a>
|
||||
<a href="/case-studies/">Case Studies</a>
|
||||
|
|
|
|||
|
|
@ -29,4 +29,5 @@
|
|||
{% if page.js %}{% assign jslist = page.js | split: ',' | compact %}{% for jsurl in jslist %}<script src="{{ jsurl | strip }}"></script>
|
||||
{% endfor %}<!-- custom js added -->{% else %}<!-- no custom js detected -->{% endif %}
|
||||
{% seo %}
|
||||
{% feed_meta %}
|
||||
</head>
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
<div class="nav-buttons" data-auto-burger="primary">
|
||||
<ul class="global-nav">
|
||||
<li><a href="/docs/home/">Documentation</a></li>
|
||||
<li><a href="http://blog.kubernetes.io/">Blog</a></li>
|
||||
<li><a href="/blog">Blog</a></li>
|
||||
<li><a href="/partners/">Partners</a></li>
|
||||
<li><a href="/community/">Community</a></li>
|
||||
<li><a href="/case-studies/">Case Studies</a></li>
|
||||
|
|
@ -45,7 +45,7 @@
|
|||
<p>If you need help, you can connect with other Kubernetes users and the Kubernetes authors, attend community events, and watch video presentations from around the web.</p>
|
||||
</div>
|
||||
<div class="nav-box">
|
||||
<h3><a href="http://blog.kubernetes.io">Blog</a></h3>
|
||||
<h3><a href="/blog">Blog</a></h3>
|
||||
<p>Read the latest news for Kubernetes and the containers space in general, and get technical how-tos hot off the presses.</p>
|
||||
</div>
|
||||
</main>
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
<iframe width="560" height="315" src="https://www.youtube.com/embed/{{ include.id }}" frameborder="0" allowfullscreen></iframe>
|
||||
|
|
@ -0,0 +1,193 @@
|
|||
---
|
||||
#empty front matter
|
||||
---
|
||||
<!DOCTYPE html>
|
||||
<html id="blog">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link rel="shortcut icon" type="image/png" href="/images/favicon.png">
|
||||
<link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/css?family=Roboto:400,100,100italic,300,300italic,400italic,500,500italic,700,700italic,900,900italic">
|
||||
<link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/css?family=Roboto+Mono">
|
||||
<link rel="stylesheet" type="text/css" href="/css/jquery-ui.min.css">
|
||||
<link rel='stylesheet' type='text/css' href='https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'>
|
||||
<!-- <link rel="stylesheet" type="text/css" href="/css/case_study_styles.css"> -->
|
||||
<link rel="stylesheet" type="text/css" href="/css/blog.css">
|
||||
<link rel="stylesheet" type="text/css" href="/css/styles.css">
|
||||
<!-- <link rel="stylesheet" type="text/css" href="/css/sweetalert.css"> -->
|
||||
<link rel="stylesheet" type="text/css" href="/css/callouts.css">
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="/css/custom-jekyll/tags.css">
|
||||
{% if page.deprecated %}<link rel="stylesheet" type="text/css" href="/css/deprecation-warning.css">{% endif %}
|
||||
<link rel="stylesheet" type="text/css" href="/css/gridpage.css">
|
||||
<link rel="stylesheet" type="text/css" href="/css/blog.css">
|
||||
|
||||
{% if page.description %}
|
||||
<meta name="description" content="{{ page.description }}" />
|
||||
{% else %}
|
||||
<meta name="description" content="{{ page.title }}" />
|
||||
{% endif %}
|
||||
|
||||
<script src="/js/jquery-2.2.0.min.js"></script>
|
||||
<script src="/js/jquery-ui.min.js"></script>
|
||||
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
|
||||
<script src="/js/script.js"></script>
|
||||
<script src="/js/sweetalert.min.js"></script>
|
||||
<script src="/js/custom-jekyll/tags.js"></script>
|
||||
<script defer src="https://use.fontawesome.com/releases/v5.0.6/js/all.js"></script>
|
||||
|
||||
{% feed_meta %}
|
||||
|
||||
{% seo %}
|
||||
</head>
|
||||
|
||||
|
||||
|
||||
<body>
|
||||
<div id="cellophane" onclick="kub.toggleMenu()"></div>
|
||||
<header>
|
||||
<nav class="nav-buttons" data-auto-burger="primary">
|
||||
<ul class="global-nav">
|
||||
<li><a href="/docs/home/">Documentation</a></li>
|
||||
<li><a href="/blog">Blog</a></li>
|
||||
<li><a href="/partners/">Partners</a></li>
|
||||
<li><a href="/community/">Community</a></li>
|
||||
<li><a href="/case-studies/">Case Studies</a></li>
|
||||
<li>
|
||||
<a href="#">
|
||||
{{page.version}} <span class="ui-icon ui-icon-carat-1-s"></span>
|
||||
</a>
|
||||
<ul>
|
||||
{% for version in page.versions %}
|
||||
{% if page.versionedurl contains version.version %}
|
||||
<li><a href="{{ version.url }}{{ page.versionedurl[version.version] }}">{{ version.version }}</a></li>
|
||||
{% else %}
|
||||
<li><a href="{{ version.url }}{{ page.url }}">{{ version.version }}</a></li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</li>
|
||||
</li>
|
||||
</ul>
|
||||
<a href="/docs/home" class="button" id="viewDocs" data-auto-burger-exclude>View Documentation</a>
|
||||
<a href="/docs/tutorials/kubernetes-basics/" class="button" id="tryKubernetes" data-auto-burger-exclude>Try Kubernetes</a>
|
||||
<button id="hamburger" onclick="kub.toggleMenu()" data-auto-burger-exclude><div></div></button>
|
||||
</nav>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<div class="container-fluid">
|
||||
<div id="blog-hero" class="light-text">
|
||||
<a class= 'logo' href='http://kubernetes.io/'></a>
|
||||
<div class='blog-title'>
|
||||
<h1> Kubernetes Blog </h1>
|
||||
</div>
|
||||
|
||||
<div class="clear" style="clear: both"></div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="row blog-content" >
|
||||
<div class="col-xs-10 col-sm-9 col-lg-9 text">
|
||||
|
||||
<h4 class="date-header">{{ page.date | date: ' %A, %B %-d, %Y' }} </h4>
|
||||
<h3 class="post-title entry-title"> {{page.title}} </h3>
|
||||
{{ content }}
|
||||
|
||||
<div class="PageNavigation">
|
||||
<div class="pagerButton left">
|
||||
{% if page.next.url %}
|
||||
<h4><a class=" button" href="{{page.next.url}}"> « Next </a></h4>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<div class="pagerButton right">
|
||||
{% if page.previous.url %}
|
||||
<h4><a class=" button" href="{{page.previous.url}}"> Prev »</a></h4>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="col-xs-1 col-sm-1 col-sm-3 col-lg-3 text">
|
||||
<div ="widget-content">
|
||||
<link href='http://kubernetes.io/feed.xml' rel='alternate' type='application/atom+xml'>
|
||||
<a class="widget-link" href="https://kubernetes.io/feed.xml"> <i class="fas fa-rss fab-icon"> </i> </a>
|
||||
<a class="widget-link" href="https://twitter.com/kubernetesio"> <i class="fab fa-twitter-square fab-icon"> </i> @Kubernetesio</a>
|
||||
<a class="widget-link" href="https://github.com/kubernetes/kubernetes"> <i class="fab fa-github-square fab-icon"></i> View on Github </a>
|
||||
<a class="widget-link" href="http://slack.k8s.io"><i class="fab fa-slack fab-icon"> </i> #kubernetes-users </a>
|
||||
<a class="widget-link" href="http://stackoverflow.com/questions/tagged/kubernetes"><i class="fab fa-stack-overflow fab-icon"></i>Stack Overflow</a>
|
||||
<a class="widget-link" href="http://get.k8s.io/"><i class="fab fa-stack-overflow fab-icon"></i>Download Kubernetes</a>
|
||||
</div>
|
||||
|
||||
<!-- Begin Sidebar Loop -->
|
||||
{% for post in site.posts %}
|
||||
{% capture this_year %}{{ post.date | date: "%Y" }}{% endcapture %}
|
||||
{% capture this_month %}{{ post.date | date: "%B" }}{% endcapture %}
|
||||
{% capture next_year %}{{ post.previous.date | date: "%Y" }}{% endcapture %}
|
||||
{% capture next_month %}{{ post.previous.date | date: "%B" }}{% endcapture %}
|
||||
{% if forloop.first %}
|
||||
<div class="sidebar-current-year">
|
||||
<h4 id="{{ this_year }}-ref">{{this_year}}</h4>
|
||||
<!-- Here we open a div and ul that are conditionally closed in the lower blocks -->
|
||||
<div>
|
||||
<h4 id="{{ this_year }}-{{ this_month }}-ref">{{ this_month }}</h4>
|
||||
<ul>
|
||||
{% endif %}
|
||||
<li><a href="{{ post.url }}">{{ post.title }}</a></li>
|
||||
{% if forloop.last %} <!-- This section closes the entire div and ul at the final iteration-->
|
||||
</ul>
|
||||
</div>
|
||||
</div> <!-- end of final conditional -->
|
||||
{% else %}
|
||||
{% if this_year != next_year %}
|
||||
</ul>
|
||||
<!-- this closes the collapsible year body -->
|
||||
</div>
|
||||
<!-- this closes the sidebar-previous-year opened below -->
|
||||
</div>
|
||||
<div class="sidebar-previous-year">
|
||||
<input type="checkbox" id="{{ next_year }}-ref-checkbox" class="heading-year-toggle-checkbox"/>
|
||||
<label class="collapsible-year-heading-label" for="{{ next_year }}-ref-checkbox">
|
||||
<span class="collapsible-year-body-caret">► </span>
|
||||
<h4 class="collapsible-year-header" id="{{ next_year }}-ref">{{next_year}}</h4>
|
||||
</label>
|
||||
<div class="collapsible-year-body">
|
||||
<input type="checkbox" id="{{ next_year }}-{{ next_month }}-ref-checkbox" class="heading-month-toggle-checkbox"/>
|
||||
<label class="collapsible-month-heading-label" for="{{ next_year }}-{{ next_month }}-ref-checkbox">
|
||||
<span class="collapsible-posts-list-caret">► </span>
|
||||
<h5 class="collapsible-month-header" id="{{ next_year }}-{{ next_month }}-ref">{{ next_month }}</h5>
|
||||
</label>
|
||||
<ul class="collapsible-posts-list">
|
||||
{% else %}
|
||||
{% if this_month != next_month %}
|
||||
</ul>
|
||||
|
||||
|
||||
<input type="checkbox" id="{{ this_year }}-{{ next_month }}-ref-checkbox" class="heading-month-toggle-checkbox"/>
|
||||
<label class="collapsible-month-heading-label" for="{{ this_year }}-{{ next_month }}-ref-checkbox">
|
||||
<span class="collapsible-posts-list-caret">► </span>
|
||||
<h5 class="collapsible-month-header" id="{{ this_year }}-{{ next_month }}-ref">{{ next_month }}</h5>
|
||||
</label>
|
||||
|
||||
<ul class="collapsible-posts-list">
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</body>
|
||||
{% include footer.html %}
|
||||
{% include footer-scripts.html %}
|
||||
</html>
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Gathering Videos "
|
||||
date: Tuesday, March 23, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
If you missed the Kubernetes Gathering in SF last month, fear not! Here are the videos from the evening presentations organized into a playlist on YouTube
|
||||
|
||||
[](https://www.youtube.com/playlist?list=PL69nYSiGNLP2FBVvSLHpJE8_6hRHW8Kxe)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Paricipate in a Kubernetes User Experience Study "
|
||||
date: Wednesday, March 31, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
We need your help in shaping the future of Kubernetes and Google Container Engine, and we'd love to have you participate in a remote UX research study to help us learn about your experiences! If you're interested in participating, we invite you to take [this brief survey](http://goo.gl/AXFFMs) to see if you qualify. If you’re selected to participate, we’ll follow up with you directly.
|
||||
|
||||
|
||||
- Length: 60 minute interview
|
||||
- Date: April 7th-15th
|
||||
- Location: Remote
|
||||
- Your gift: $100 Perks gift code\*
|
||||
- Study format: Interview with our researcher
|
||||
|
||||
|
||||
Interested in participating? Take [this brief survey](http://goo.gl/AXFFMs).
|
||||
|
||||
|
||||
|
||||
\* Perks gift codes can be redeemed for gift certificates from VISA and used at a number of online retailers ([http://www.google.com/forms/perks/index1.html](http://www.google.com/forms/perks/index1.html)). Gift codes are only for participants who successfully complete the study session. You’ll be emailed the gift code after you complete the study session.
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - March 27 2015 "
|
||||
date: Sunday, March 28, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Agenda:
|
||||
|
||||
\- Andy - demo remote execution and port forwarding
|
||||
|
||||
\- Quinton - Cluster federation - Postponed
|
||||
|
||||
\- Clayton - UI code sharing and collaboration around Kubernetes
|
||||
|
||||
Notes from meeting:
|
||||
|
||||
1\. Andy from RedHat:
|
||||
|
||||
* Demo remote execution
|
||||
|
||||
* kubectl exec -p $POD -- $CMD
|
||||
|
||||
* Makes a connection to the master as proxy, figures out which node the pod is on, proxies connection to kubelet, which does the interesting bit. via nsenter.
|
||||
|
||||
* Multiplexed streaming over HTTP using SPDY
|
||||
|
||||
* Also interactive mode:
|
||||
|
||||
* Assumes first container. Can use -c $CONTAINER to pick a particular one.
|
||||
|
||||
* If have gdb pre-installed in container, then can interactively attach it to running process
|
||||
|
||||
* backtrace, symbol tbles, print, etc. Most things you can do with gdb.
|
||||
|
||||
* Can also with careful flag crafting run rsync over this or set up sshd inside container.
|
||||
|
||||
* Some feedback via chat:
|
||||
* Andy also demoed port forwarding
|
||||
* nsenter vs. docker exec
|
||||
|
||||
* want to inject a binary under control of the host, similar to pre-start hooks
|
||||
|
||||
* socat, nsenter, whatever the pre-start hook needs
|
||||
* would be nice to blog post on this
|
||||
* version of nginx in wheezy is too old to support needed master-proxy functionality
|
||||
|
||||
2\. Clayton: where are we wrt a community organization for e.g. kubernetes UI components?
|
||||
|
||||
* google-containers-ui IRC channel, mailing list.
|
||||
* Tim: google-containers prefix is historical, should just do "kubernetes-ui"
|
||||
* also want to put design resources in, and bower expects its own repo.
|
||||
* General agreement
|
||||
|
||||
3\. Brian Grant:
|
||||
|
||||
* Testing v1beta3, getting that ready to go in.
|
||||
* Paul working on changes to commandline stuff.
|
||||
* Early to mid next week, try to enable v1beta3 by default?
|
||||
* For any other changes, file issue and CC thockin.
|
||||
|
||||
4\. General consensus that 30 minutes is better than 60
|
||||
|
||||
|
||||
|
||||
* Shouldn't artificially try to extend just to fill time.
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: Welcome to the Kubernetes Blog!
|
||||
date: Saturday, March 20, 2015
|
||||
---
|
||||
Welcome to the new Kubernetes blog. Follow this blog to learn about the Kubernetes Open Source project. We plan to post release notes, how-to articles, events, and maybe even some off topic fun here from time to time.
|
||||
|
||||
|
||||
If you are using Kubernetes or contributing to the project and would like to do a guest post, [please let me know](mailto:kitm@google.com).
|
||||
|
||||
|
||||
|
||||
To start things off, here's a roundup of recent Kubernetes posts from other sites:
|
||||
|
||||
- [Scaling MySQL in the cloud with Vitess and Kubernetes](http://googlecloudplatform.blogspot.com/2015/03/scaling-MySQL-in-the-cloud-with-Vitess-and-Kubernetes.html)
|
||||
- [Container Clusters on VMs](http://googlecloudplatform.blogspot.com/2015/02/container-clusters-on-vms.html)
|
||||
- [Everything you wanted to know about Kubernetes but were afraid to ask](http://googlecloudplatform.blogspot.com/2015/01/everything-you-wanted-to-know-about-Kubernetes-but-were-afraid-to-ask.html)
|
||||
- [What makes a container cluster?](http://googlecloudplatform.blogspot.com/2015/01/what-makes-a-container-cluster.html)
|
||||
- [Integrating OpenStack and Kubernetes with Murano](https://www.mirantis.com/blog/integrating-openstack-and-kubernetes-with-murano/)
|
||||
- [An introduction to containers, Kubernetes, and the trajectory of modern cloud computing](http://googlecloudplatform.blogspot.com/2015/01/in-coming-weeks-we-will-be-publishing.html)
|
||||
- [What is Kubernetes and how to use it?](http://www.centurylinklabs.com/what-is-kubernetes-and-how-to-use-it/)
|
||||
- [OpenShift V3, Docker and Kubernetes Strategy](https://blog.openshift.com/v3-docker-kubernetes-interview/)
|
||||
- [An Introduction to Kubernetes](https://www.digitalocean.com/community/tutorials/an-introduction-to-kubernetes)
|
||||
|
||||
|
||||
Happy cloud computing!
|
||||
|
||||
|
||||
- Kit Merker - Product Manager, Google Cloud Platform
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Borg: The Predecessor to Kubernetes "
|
||||
date: Friday, April 23, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Google has been running containerized workloads in production for more than a decade. Whether it's service jobs like web front-ends and stateful servers, infrastructure systems like [Bigtable](http://research.google.com/archive/bigtable.html) and [Spanner](http://research.google.com/archive/spanner.html), or batch frameworks like [MapReduce](http://research.google.com/archive/mapreduce.html) and [Millwheel](http://research.google.com/pubs/pub41378.html), virtually everything at Google runs as a container. Today, we took the wraps off of Borg, Google’s long-rumored internal container-oriented cluster-management system, publishing details at the academic computer systems conference [Eurosys](http://eurosys2015.labri.fr/). You can find the paper [here](https://research.google.com/pubs/pub43438.html).
|
||||
|
||||
|
||||
|
||||
Kubernetes traces its lineage directly from Borg. Many of the developers at Google working on Kubernetes were formerly developers on the Borg project. We've incorporated the best ideas from Borg in Kubernetes, and have tried to address some pain points that users identified with Borg over the years.
|
||||
|
||||
|
||||
|
||||
To give you a flavor, here are four Kubernetes features that came from our experiences with Borg:
|
||||
|
||||
|
||||
|
||||
1) [Pods](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/pods.md). A pod is the unit of scheduling in Kubernetes. It is a resource envelope in which one or more containers run. Containers that are part of the same pod are guaranteed to be scheduled together onto the same machine, and can share state via local volumes.
|
||||
|
||||
|
||||
|
||||
Borg has a similar abstraction, called an alloc (short for “resource allocation”). Popular uses of allocs in Borg include running a web server that generates logs alongside a lightweight log collection process that ships the log to a cluster filesystem (not unlike fluentd or logstash); running a web server that serves data from a disk directory that is populated by a process that reads data from a cluster filesystem and prepares/stages it for the web server (not unlike a Content Management System); and running user-defined processing functions alongside a storage shard. Pods not only support these use cases, but they also provide an environment similar to running multiple processes in a single VM -- Kubernetes users can deploy multiple co-located, cooperating processes in a pod without having to give up the simplicity of a one-application-per-container deployment model.
|
||||
|
||||
|
||||
|
||||
2) [Services](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md). Although Borg’s primary role is to manage the lifecycles of tasks and machines, the applications that run on Borg benefit from many other cluster services, including naming and load balancing. Kubernetes supports naming and load balancing using the service abstraction: a service has a name and maps to a dynamic set of pods defined by a label selector (see next section). Any container in the cluster can connect to the service using the service name. Under the covers, Kubernetes automatically load-balances connections to the service among the pods that match the label selector, and keeps track of where the pods are running as they get rescheduled over time due to failures.
|
||||
|
||||
|
||||
|
||||
3) [Labels](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/labels.md). A container in Borg is usually one replica in a collection of identical or nearly identical containers that correspond to one tier of an Internet service (e.g. the front-ends for Google Maps) or to the workers of a batch job (e.g. a MapReduce). The collection is called a Job, and each replica is called a Task. While the Job is a very useful abstraction, it can be limiting. For example, users often want to manage their entire service (composed of many Jobs) as a single entity, or to uniformly manage several related instances of their service, for example separate canary and stable release tracks. At the other end of the spectrum, users frequently want to reason about and control subsets of tasks within a Job -- the most common example is during rolling updates, when different subsets of the Job need to have different configurations.
|
||||
|
||||
|
||||
|
||||
Kubernetes supports more flexible collections than Borg by organizing pods using labels, which are arbitrary key/value pairs that users attach to pods (and in fact to any object in the system). Users can create groupings equivalent to Borg Jobs by using a “job:\<jobname\>” label on their pods, but they can also use additional labels to tag the service name, service instance (production, staging, test), and in general, any subset of their pods. A label query (called a “label selector”) is used to select which set of pods an operation should be applied to. Taken together, labels and [replication controllers](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/replication-controller.md) allow for very flexible update semantics, as well as for operations that span the equivalent of Borg Jobs.
|
||||
|
||||
|
||||
|
||||
4) IP-per-Pod. In Borg, all tasks on a machine use the IP address of that host, and thus share the host’s port space. While this means Borg can use a vanilla network, it imposes a number of burdens on infrastructure and application developers: Borg must schedule ports as a resource; tasks must pre-declare how many ports they need, and take as start-up arguments which ports to use; the Borglet (node agent) must enforce port isolation; and the naming and RPC systems must handle ports as well as IP addresses.
|
||||
|
||||
|
||||
|
||||
Thanks to the advent of software-defined overlay networks such as [flannel](https://coreos.com/blog/introducing-rudder/) or those built into [public clouds](https://cloud.google.com/compute/docs/networking), Kubernetes is able to give every pod and service its own IP address. This removes the infrastructure complexity of managing ports, and allows developers to choose any ports they want rather than requiring their software to adapt to the ones chosen by the infrastructure. The latter point is crucial for making it easy to run off-the-shelf open-source applications on Kubernetes--pods can be treated much like VMs or physical hosts, with access to the full port space, oblivious to the fact that they may be sharing the same physical machine with other pods.
|
||||
|
||||
|
||||
|
||||
With the growing popularity of container-based microservice architectures, the lessons Google has learned from running such systems internally have become of increasing interest to the external DevOps community. By revealing some of the inner workings of our cluster manager Borg, and building our next-generation cluster manager as both an open-source project (Kubernetes) and a publicly available hosted service ([Google Container Engine](http://cloud.google.com/container-engine)), we hope these lessons can benefit the broader community outside of Google and advance the state-of-the-art in container scheduling and cluster management.
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Faster than a speeding Latte "
|
||||
date: Tuesday, April 06, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Check out Brendan Burns racing Kubernetes.
|
||||
[](https://www.youtube.com/watch?v=?7vZ9dRKRMyc)
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Introducing Kubernetes API Version v1beta3 "
|
||||
date: Friday, April 16, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
We've been hard at work on cleaning up the API over the past several months (see [https://github.com/GoogleCloudPlatform/kubernetes/issues/1519](https://github.com/GoogleCloudPlatform/kubernetes/issues/1519) for details). The result is v1beta3, which is considered to be the release candidate for the v1 API.
|
||||
|
||||
We would like you to move to this new API version as soon as possible. v1beta1 and v1beta2 are deprecated, and will be removed by the end of June, shortly after we introduce the v1 API.
|
||||
|
||||
As of the latest release, v0.15.0, v1beta3 is the primary, default API. We have changed the default kubectl and client API versions as well as the default storage version (which means objects persisted in etcd will be converted from v1beta1 to v1beta3 as they are rewritten).
|
||||
|
||||
You can take a look at v1beta3 examples such as:
|
||||
|
||||
[https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/guestbook/v1beta3](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/guestbook/v1beta3)
|
||||
|
||||
[https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough/v1beta3](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough/v1beta3)
|
||||
|
||||
[https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/update-demo/v1beta3](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/update-demo/v1beta3)
|
||||
|
||||
|
||||
|
||||
To aid the transition, we've also created a conversion [tool](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/cluster_management.md#switching-your-config-files-to-a-new-api-version) and put together a list of important [different API changes](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api.md#v1beta3-conversion-tips).
|
||||
|
||||
|
||||
- The resource `id` is now called `name`.
|
||||
- `name`, `labels`, `annotations`, and other metadata are now nested in a map called `metadata`
|
||||
- `desiredState` is now called `spec`, and `currentState` is now called `status`
|
||||
- `/minions` has been moved to `/nodes`, and the resource has kind `Node`
|
||||
- The namespace is required (for all namespaced resources) and has moved from a URL parameter to the path:`/api/v1beta3/namespaces/{namespace}/{resource_collection}/{resource_name}`
|
||||
- The names of all resource collections are now lower cased - instead of `replicationControllers`, use`replicationcontrollers`.
|
||||
- To watch for changes to a resource, open an HTTP or Websocket connection to the collection URL and provide the`?watch=true` URL parameter along with the desired `resourceVersion` parameter to watch from.
|
||||
- The container `entrypoint` has been renamed to `command`, and `command` has been renamed to `args`.
|
||||
- Container, volume, and node resources are expressed as nested maps (e.g., `resources{cpu:1}`) rather than as individual fields, and resource values support [scaling suffixes](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/resources.md#resource-quantities) rather than fixed scales (e.g., milli-cores).
|
||||
- Restart policy is represented simply as a string (e.g., "Always") rather than as a nested map ("always{}").
|
||||
- The volume `source` is inlined into `volume` rather than nested.
|
||||
- Host volumes have been changed to hostDir to hostPath to better reflect that they can be files or directories
|
||||
|
||||
|
||||
|
||||
And the most recently generated Swagger specification of the API is here:
|
||||
|
||||
[http://kubernetes.io/third\_party/swagger-ui/#!/v1beta3](http://kubernetes.io/third_party/swagger-ui/#!/v1beta3)
|
||||
|
||||
|
||||
|
||||
More details about our approach to API versioning and the transition can be found here:
|
||||
|
||||
[https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api.md](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api.md)
|
||||
|
||||
|
||||
|
||||
Another change we discovered is that with the change to the default API version in kubectl, commands that use "-o template" will break unless you specify "--api-version=v1beta1" or update to v1beta3 syntax. An example of such a change can be seen here:
|
||||
|
||||
[https://github.com/GoogleCloudPlatform/kubernetes/pull/6377/files](https://github.com/GoogleCloudPlatform/kubernetes/pull/6377/files)
|
||||
|
||||
|
||||
|
||||
If you use "-o template", I recommend always explicitly specifying the API version rather than relying upon the default. We may add this setting to kubeconfig in the future.
|
||||
|
||||
|
||||
|
||||
Let us know if you have any questions. As always, we're available on IRC (#google-containers) and github issues.
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes and the Mesosphere DCOS "
|
||||
date: Thursday, April 22, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
# Kubernetes and the Mesosphere DCOS
|
||||
|
||||
|
||||
|
||||
Today Mesosphere announced the addition of Kubernetes as a standard part of their [DCOS][1] offering. This is a great step forwards in bringing cloud native application management to the world, and should lay to rest many questions we hear about 'Kubernetes or Mesos, which one should I use?'. Now you can have your cake and eat it too: use both. Today's announcement extends the reach of Kubernetes to a new class of users, and add some exciting new capabilities for everyone.
|
||||
|
||||
By way of background, Kubernetes is a cluster management framework that was started by Google nine months ago, inspired by the internal system known as Borg. You can learn a little more about Borg by checking out this [paper][2]. At the heart of it Kubernetes offers what has been dubbed 'cloud native' application management. To us, there are three things that together make something 'cloud native':
|
||||
|
||||
|
||||
|
||||
* **Container oriented deployments** Package up your application components with all their dependencies and deploy them using technologies like Docker or Rocket. Containers radically simplify the deployment process, making rollouts repeatable and predictable.
|
||||
* **Dynamically managed** Rely on modern control systems to make moment-to-moment decisions around the health management and scheduling of applications to radically improve reliability and efficiency. There are some things that just machines do better than people, and actively running applications is one of those things.
|
||||
* **Micro-services oriented** Tease applications apart into small semi-autonomous services that can be consumed easily so that the resulting systems are easier to understand, extend and adapt.
|
||||
|
||||
Kubernetes was designed from the start to make these capabilities available to everyone, and built by the same engineers that built the system internally known as Borg. For many users the promise of 'Google style app management' is interesting, but they want to run these new classes of applications on the same set of physical resources as their existing workloads like Hadoop, Spark, Kafka, etc. Now they will have access to commercially supported offering that brings the two worlds together.
|
||||
|
||||
Mesosphere, one of the earliest supporters of the Kubernetes project, has been working closely with the core Kubernetes team to create a natural experience for users looking to get the best of both worlds, adding Kubernetes to every Mesos deployment they instantiate, whether it be in the public cloud, private cloud, or in a hybrid deployment model. This is well aligned with the overall Kubernetes vision of creating ubiquitous management framework that runs anywhere a container can. It will be interesting to see how you blend together the old world and the new on a commercially supported, versatile platform.
|
||||
|
||||
Craig McLuckie
|
||||
|
||||
Product Manager, Google and Kubernetes co-founder
|
||||
|
||||
[1]: https://mesosphere.com/product/
|
||||
[2]: http://research.google.com/pubs/pub43438.html
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Release: 0.15.0 "
|
||||
date: Friday, April 16, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
Release Notes:
|
||||
|
||||
|
||||
|
||||
* Enables v1beta3 API and sets it to the default API version ([#6098][1])
|
||||
* Added multi-port Services ([#6182][2])
|
||||
* New Getting Started Guides
|
||||
* Multi-node local startup guide ([#6505][3])
|
||||
* Mesos on Google Cloud Platform ([#5442][4])
|
||||
* Ansible Setup instructions ([#6237][5])
|
||||
* Added a controller framework ([#5270][6], [#5473][7])
|
||||
* The Kubelet now listens on a secure HTTPS port ([#6380][8])
|
||||
* Made kubectl errors more user-friendly ([#6338][9])
|
||||
* The apiserver now supports client cert authentication ([#6190][10])
|
||||
* The apiserver now limits the number of concurrent requests it processes ([#6207][11])
|
||||
* Added rate limiting to pod deleting ([#6355][12])
|
||||
* Implement Balanced Resource Allocation algorithm as a PriorityFunction in scheduler package ([#6150][13])
|
||||
* Enabled log collection from master ([#6396][14])
|
||||
* Added an api endpoint to pull logs from Pods ([#6497][15])
|
||||
* Added latency metrics to scheduler ([#6368][16])
|
||||
* Added latency metrics to REST client ([#6409][17])
|
||||
* etcd now runs in a pod on the master ([#6221][18])
|
||||
* nginx now runs in a container on the master ([#6334][19])
|
||||
* Began creating Docker images for master components ([#6326][20])
|
||||
* Updated GCE provider to work with gcloud 0.9.54 ([#6270][21])
|
||||
* Updated AWS provider to fix Region vs Zone semantics ([#6011][22])
|
||||
* Record event when image GC fails ([#6091][23])
|
||||
* Add a QPS limiter to the kubernetes client ([#6203][24])
|
||||
* Decrease the time it takes to run make release ([#6196][25])
|
||||
* New volume support
|
||||
* Added iscsi volume plugin ([#5506][26])
|
||||
* Added glusterfs volume plugin ([#6174][27])
|
||||
* AWS EBS volume support ([#5138][28])
|
||||
* Updated to heapster version to v0.10.0 ([#6331][29])
|
||||
* Updated to etcd 2.0.9 ([#6544][30])
|
||||
* Updated to Kibana to v1.2 ([#6426][31])
|
||||
* Bug Fixes
|
||||
* Kube-proxy now updates iptables rules if a service's public IPs change ([#6123][32])
|
||||
* Retry kube-addons creation if the initial creation fails ([#6200][33])
|
||||
* Make kube-proxy more resiliant to running out of file descriptors ([#6727][34])
|
||||
|
||||
To download, please visit https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.15.0
|
||||
|
||||
[1]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6098 "Enabling v1beta3 api version by default in master"
|
||||
[2]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6182 "Implement multi-port Services"
|
||||
[3]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6505 "Docker multi-node"
|
||||
[4]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5442 "Getting started guide for Mesos on Google Cloud Platform"
|
||||
[5]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6237 "example ansible setup repo"
|
||||
[6]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5270 "Controller framework"
|
||||
[7]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5473 "Add DeltaFIFO (a controller framework piece)"
|
||||
[8]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6380 "Configure the kubelet to use HTTPS (take 2)"
|
||||
[9]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6338 "Return a typed error for config validation, and make errors simple"
|
||||
[10]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6190 "Add client cert authentication"
|
||||
[11]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6207 "Add a limit to the number of in-flight requests that a server processes."
|
||||
[12]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6355 "Added rate limiting to pod deleting"
|
||||
[13]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6150 "Implement Balanced Resource Allocation (BRA) algorithm as a PriorityFunction in scheduler package."
|
||||
[14]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6396 "Enable log collection from master."
|
||||
[15]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6497 "Pod log subresource"
|
||||
[16]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6368 "Add basic latency metrics to scheduler."
|
||||
[17]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6409 "Add latency metrics to REST client"
|
||||
[18]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6221 "Run etcd 2.0.5 in a pod"
|
||||
[19]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6334 "Add an nginx docker image for use on the master."
|
||||
[20]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6326 "Create Docker images for master components "
|
||||
[21]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6270 "Updates for gcloud 0.9.54"
|
||||
[22]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6011 "Fix AWS region vs zone"
|
||||
[23]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6091 "Record event when image GC fails."
|
||||
[24]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6203 "Add a QPS limiter to the kubernetes client."
|
||||
[25]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6196 "Parallelize architectures in both the building and packaging phases of `make release`"
|
||||
[26]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5506 "add iscsi volume plugin"
|
||||
[27]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6174 "implement glusterfs volume plugin"
|
||||
[28]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5138 "AWS EBS volume support"
|
||||
[29]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6331 "Update heapster version to v0.10.0"
|
||||
[30]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6544 "Build etcd image (version 2.0.9), and upgrade kubernetes cluster to the new version"
|
||||
[31]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6426 "Update Kibana to v1.2 which paramaterizes location of Elasticsearch"
|
||||
[32]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6123 "Fix bug in kube-proxy of not updating iptables rules if a service's public IPs change"
|
||||
[33]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6200 "Retry kube-addons creation if kube-addons creation fails."
|
||||
[34]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6727 "pkg/proxy: panic if run out of fd"
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - April 3 2015 "
|
||||
date: Sunday, April 04, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
# Kubernetes: Weekly Kubernetes Community Hangout Notes
|
||||
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Agenda:
|
||||
|
||||
* Quinton - Cluster federation
|
||||
* Satnam - Performance benchmarking update
|
||||
|
||||
*Notes from meeting:*
|
||||
|
||||
1. Quinton - Cluster federation
|
||||
* Ideas floating around after meetup in SF
|
||||
* * Please read and comment
|
||||
* Not 1.0, but put a doc together to show roadmap
|
||||
* Can be built outside of Kubernetes
|
||||
* API to control things across multiple clusters, include some logic
|
||||
|
||||
1. Auth(n)(z)
|
||||
|
||||
2. Scheduling Policies
|
||||
|
||||
3. …
|
||||
|
||||
* Different reasons for cluster federation
|
||||
|
||||
1. Zone (un) availability : Resilient to zone failures
|
||||
|
||||
2. Hybrid cloud: some in cloud, some on prem. for various reasons
|
||||
|
||||
3. Avoid cloud provider lock-in. For various reasons
|
||||
|
||||
4. "Cloudbursting" - automatic overflow into the cloud
|
||||
* Hard problems
|
||||
|
||||
1. Location affinity. How close do pods need to be?
|
||||
|
||||
1. Workload coupling
|
||||
|
||||
2. Absolute location (e.g. eu data needs to be in eu)
|
||||
|
||||
2. Cross cluster service discovery
|
||||
|
||||
1. How does service/DNS work across clusters
|
||||
|
||||
3. Cross cluster workload migration
|
||||
|
||||
1. How do you move an application piece by piece across clusters?
|
||||
|
||||
4. Cross cluster scheduling
|
||||
|
||||
1. How do know enough about clusters to know where to schedule
|
||||
|
||||
2. Possibly use a cost function to achieve affinities with minimal complexity
|
||||
|
||||
3. Can also use cost to determine where to schedule (under used clusters are cheaper than over-used clusters)
|
||||
|
||||
* Implicit requirements
|
||||
|
||||
1. Cross cluster integration shouldn't create cross-cluster failure modes
|
||||
|
||||
1. Independently usable in a disaster situation where Ubernetes dies.
|
||||
|
||||
2. Unified visibility
|
||||
|
||||
1. Want to have unified monitoring, alerting, logging, introspection, ux, etc.
|
||||
|
||||
3. Unified quota and identity management
|
||||
|
||||
1. Want to have user database and auth(n)/(z) in a single place
|
||||
|
||||
* Important to note, most causes of software failure are not the infrastructure
|
||||
|
||||
1. Botched software upgrades
|
||||
|
||||
2. Botched config upgrades
|
||||
|
||||
3. Botched key distribution
|
||||
|
||||
4. Overload
|
||||
|
||||
5. Failed external dependencies
|
||||
|
||||
* Discussion:
|
||||
|
||||
1. Where do you draw the "ubernetes" line
|
||||
|
||||
1. Likely at the availability zone, but could be at the rack, or the region
|
||||
|
||||
2. Important to not pigeon hole and prevent other users
|
||||
|
||||
|
||||
|
||||
2. Satnam - Soak Test
|
||||
* Want to measure things that run for a long time to make sure that the cluster is stable over time. Performance doesn't degrade, no memory leaks, etc.
|
||||
* github.com/GoogleCloudPlatform/kubernetes/test/soak/…
|
||||
* Single binary, puts lots of pods on each node, and queries each pod to make sure that it is running.
|
||||
* Pods are being created much, much more quickly (even in the past week) to make things go more quickly.
|
||||
* Once the pods are up running, we hit the pods via the proxy. Decision to hit the proxy was deliberate so that we test the kubernetes apiserver.
|
||||
* Code is already checked in.
|
||||
* Pin pods to each node, exercise every pod, make sure that you get a response for each node.
|
||||
* Single binary, run forever.
|
||||
* Brian - v1beta3 is enabled by default, v1beta1 and v1beta2 deprecated, turned off in June. Should still work with upgrading existing clusters, etc.
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - April 10 2015 "
|
||||
date: Sunday, April 11, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Agenda:
|
||||
|
||||
* kubectl tooling, rolling update, deployments, imperative commands
|
||||
* Downward API / env. substitution, and maybe preconditions/dependencies
|
||||
|
||||
|
||||
**Notes from meeting:**
|
||||
|
||||
1\. kubectl improvements
|
||||
|
||||
* make it simpler to use, finish rolling update, higher-level deployment concepts
|
||||
* rolling update
|
||||
|
||||
* today
|
||||
* can replace one rc by another rc specified by a file
|
||||
|
||||
* no explicit support for rollback, can sort of do it by doing rolling update to old version
|
||||
|
||||
* we keep annotations on rcs to keep track of desired # instances; won't work for rollback case b/c not symmetric
|
||||
|
||||
* need immutable image ids; currently no uuid that corresponds to image,version so if someone pushes on top you'll re-pull that; in API server we should translate images into uuids (as close to edge as possible)
|
||||
|
||||
* would be nice to auto-gen new rc instead of having user update it (e.g. when change image tag for container, etc.; currently need to change rc name and label value; could automate generating new rc)
|
||||
|
||||
* treating rcs as pets vs. cattle
|
||||
|
||||
* "roll me from v1 to v2" (or v2 to v1) - good enough for most people. don't care about record of what happened in the past.
|
||||
|
||||
* we're providing the module ansible can call to make something happen.
|
||||
|
||||
* how do you keep track of multiple templates; today we use multiple RCs
|
||||
|
||||
* if we had a deployment controller ; deployment config spawns pos that runs rolling update; trigger is level-based update of image repository
|
||||
|
||||
* alternative short-term proposal: create new rc as clone of old one, futz with counts so new one is old one and vv, bring prev-named one (pet) down to zero and bring it back up with new template (this is very similar to how Borg does job updates)
|
||||
* is it worthwhile if we want to have the deployments anyway? yes b/c we have lots of concepts already; need to simplify
|
||||
|
||||
* deployment controller keeps track of multiple templates which is what you need for rolling updates and canaries
|
||||
|
||||
* only reason for new thing is to move the process into the server instead of the client?
|
||||
|
||||
* may not need to make it an API object; should provide experience where it's not an API object and is just something client side
|
||||
|
||||
* need an experience now so need to do it in client because object won't land before 1.0
|
||||
|
||||
* having simplified experience for people who only want to enageg w/ RCs
|
||||
|
||||
* how does rollback work: ctrl-c, rollout v2 v1. rollback pattern can be in person's head. 2 kinds of rollback: i'm at steady state and want to go back, and i've got canary deployment and hit ctrl-c how do i get rid of the canary deployment (e.g. new is failing). ctrl-c might not work. delete canary controller and its pods. wish there was a command to also delete pods (there is -- kbectl stop). argument for not reusing name: when you move fwd you can stop the new thing and you're ok, vs. if you replace the old one and you've created a copy if you hit ctrl-c you don't have anything you can stop. but you could wait to flip the name until the end, use naming convention so can figure out what is going on, etc.
|
||||
|
||||
* two different experiences: (1) i'm using version control, have version history of last week rollout this week, rolling update with two files -> create v2, ??? v1, don't have a pet - moved into world of version control where have cumulative history and; (1) imperative kubectl v1 v2 where sys takes care of details, that's where we use the snapshot pattern
|
||||
|
||||
* other imperative commands
|
||||
|
||||
* run-container (or just run): spec command on command line which makes it more similar to docker run; but not multi-container pods.
|
||||
|
||||
* \--forever vs. not (one shot exec via simple commad)
|
||||
|
||||
* would like it go interactive - run -it and runs in cluster but you have interactive terminal to your process.
|
||||
|
||||
* how do command line args work. could say --image multiple times. will cobra support? in openshift we have clever syntax for grouping arguments together. doesn't work for real structured parameters.
|
||||
|
||||
* alternative: create pod; add container add container ...; run pod -- build and don't run object until 'run pod'
|
||||
|
||||
* \-- to separate container args
|
||||
|
||||
* create a pod, mutate it before you run it - like initializer pattern
|
||||
* kind discovery
|
||||
|
||||
* if we have run and sometimes it creates an rc and sometimes it doesn't, how does user know what to delete if they want to delete whatever they created with run
|
||||
|
||||
* bburns has proposal for don't specify kind if you do command like stop, delete; let kubectl figure it out
|
||||
|
||||
* alternative: allow you to define alias from name to set of resource types, eg. delete all which would follow that alias (all could mean everything in some namespace, or unscoped, etc.) - someone explicitly added something to a set vs. accidentally showed up like nodes
|
||||
|
||||
* would like to see extended to allow tools to specify their own aliases (not just users); e.g. resize can say i can handle RCs, delete can say I can handle everything, et.c so we can automatically do these things w/o users have to specify stuff. but right mechanism.
|
||||
|
||||
* resourcebuilder has conept of doing that kind of expansion depending on how we fit in targeted commands. for instance if you want to add a volume to pods and rcs, you need something to go find the pod template and change it. there's the search part of it (delete nginx -> you have to figure out what object they are referring to) and then command can say i got a pod i know what to do with a pod.
|
||||
|
||||
* alternative heuristic: what if default target of all commands was deployments. kubectl run -> deployment. too much work, easier to clean up existing CLI. leave door open for that. macro objects OK but a lot more work to make that work. eventually will want index to make these efficient. could rely more on swagger to tell us types.
|
||||
|
||||
2\. paul/downward api: env substitution
|
||||
|
||||
* create ad-hoc env var like strings, e.g. k8s_pod_name that would get sub'd by system in objects
|
||||
* allow people to create env vars that refer to fields of k8s objects w/o query api from inside their container; in some caes enables query api from their container (e.g. pass obj names, namespaces); e.g. sidecar containers need this for pulling things from api server
|
||||
* another proposal similar: instead of env var like names, have JSON-path-like syntax for referring to object field names; e.g. $.[metadata.name][1] to refer to name of current object, maybe have some syntax for referring to related objects like node that a pod is on. advantage of JSON path-like syntax is that it's less ad hoc. disadvantage is that you can only refer to things that are fields of objects.
|
||||
* for both, if you populate env vars then you have drawback that fields only set when container is created. but least degree of coupling -- off the shelf containers, containers don't need to know how to talk to k8s API. keeps the k8s concepts in the control plane.
|
||||
* we were converging on JSON path like approach. but need prototype or at least deeper proposal to demo.
|
||||
* paul: one variant is for env vars in addition to value field have different sources which is where you would plug in e.g. syntax you use to describe a field of an object; another source would be a source that described info about the host. have partial prototype. clean separation between what's in image vs. control plane. could use source idea for volume plugin.
|
||||
* use case: provide info for sidecar container to contact API server
|
||||
* use case: pass down unique identifiers or things like using UID as nique identifier
|
||||
* clayton: for rocket or gce metadata service being available for every pod for more sophisticated things; most containers want to find endpoint of service,
|
||||
|
||||
3\. preconditions/dependencies
|
||||
|
||||
* when you create pods that talk to services, the service env vars only get populated if you create the objs in the right order. if you use dns it's less of a problem but some apps are fragile. may crash if svc they depend on is not there, may take a long time to restart. proposal to have preconds that block starting pods until objs they depend on exist.
|
||||
* infer automatically if we ask people to declare which env vars they wanted, or have dep mech at pod or rc or obj level to say this obj doesn't become active until this other thing exists.
|
||||
* can use event hook? only app owner knows their dependency or when service is ready to serve.
|
||||
* one proposal is to use pre-start hook. another is precondition probe - pre-start hook could do a probe. does anything respond when i hit this svc address or ip, then probe fails. could be implemented in pre-start hook. more useful than post-start. is part of rkt spec. has stages 0, 1, 2. hard to do in docker today, easy in rocket.
|
||||
* pre-start hook in container: how will affect readiness probe since the container might have a lock until some arbitrary condition is met if you implement with prestart hook. there has to be some compensation on when kubelet runs readiness/liveness probes if you have a hook. systemd has timeouts around the stages of process lifecycle.
|
||||
* if we go to black box model of container pre-start makes sense; if container spec becomes more descriptive of process model like systemd, then does kubelet need to know more about process model to do the right thing
|
||||
* ideally msg from inside the container to say i've done all of my pre-start actions. sdnotify for systemd does this. you tell systemd that you're done, it will communicate to to other deps that you're alive.
|
||||
* but... someone could just implement preconds inside their container. makes it easier to adapt an app w/o having to change their image. alternative is just have a pattern how they do it themselves but we don't do it for them.
|
||||
|
||||
[1]: http://metadata.name/
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - April 17 2015 "
|
||||
date: Saturday, April 17, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Agenda
|
||||
|
||||
* Mesos Integration
|
||||
* High Availability (HA)
|
||||
* Adding performance and profiling details to e2e to track regressions
|
||||
* Versioned clients
|
||||
|
||||
Notes
|
||||
|
||||
|
||||
* Mesos integration
|
||||
|
||||
* Mesos integration proposal:
|
||||
|
||||
* No blockers to integration.
|
||||
|
||||
* Documentation needs to be updated.
|
||||
* HA
|
||||
|
||||
* Proposal should land today.
|
||||
|
||||
* Etcd cluster.
|
||||
|
||||
* Load-balance apiserver.
|
||||
|
||||
* Cold standby for controller manager and other master components.
|
||||
* Adding performance and profiling details to e2e to track regression
|
||||
|
||||
* Want red light for performance regression
|
||||
|
||||
* Need a public DB to post the data
|
||||
|
||||
* See
|
||||
|
||||
* Justin working on multi-platform e2e dashboard
|
||||
* Versioned clients
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
* Client library currently uses internal API objects.
|
||||
|
||||
* Nobody reported that frequent changes to types.go have been painful, but we are worried about it.
|
||||
|
||||
* Structured types are useful in the client. Versioned structs would be ok.
|
||||
|
||||
* If start with json/yaml (kubectl), shouldn’t convert to structured types. Use swagger.
|
||||
* Security context
|
||||
|
||||
*
|
||||
|
||||
* Administrators can restrict who can run privileged containers or require specific unix uids
|
||||
|
||||
* Kubelet will be able to get pull credentials from apiserver
|
||||
|
||||
* Policy proposal coming in the next week or so
|
||||
* Discussing upstreaming of users, etc. into Kubernetes, at least as optional
|
||||
* 1.0 Roadmap
|
||||
|
||||
* Focus is performance, stability, cluster upgrades
|
||||
|
||||
* TJ has been making some edits to [roadmap.md][4] but hasn’t sent out a PR yet
|
||||
* Kubernetes UI
|
||||
|
||||
* Dependencies broken out into third-party
|
||||
|
||||
* @lavalamp is reviewer
|
||||
|
||||
|
||||
[1]: http://kubernetes.io/images/nav_logo.svg
|
||||
[2]: http://kubernetes.io/docs/
|
||||
[3]: http://blog.kubernetes.io/
|
||||
[4]: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/roadmap.md
|
||||
[5]: http://blog.kubernetes.io/2015/04/weekly-kubernetes-community-hangout_17.html "permanent link"
|
||||
[6]: https://resources.blogblog.com/img/icon18_edit_allbkg.gif
|
||||
[7]: https://www.blogger.com/post-edit.g?blogID=112706738355446097&postID=630924463010638300&from=pencil "Edit Post"
|
||||
[8]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=630924463010638300&target=email "Email This"
|
||||
[9]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=630924463010638300&target=blog "BlogThis!"
|
||||
[10]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=630924463010638300&target=twitter "Share to Twitter"
|
||||
[11]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=630924463010638300&target=facebook "Share to Facebook"
|
||||
[12]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=630924463010638300&target=pinterest "Share to Pinterest"
|
||||
[13]: http://blog.kubernetes.io/search/label/community%20meetings
|
||||
[14]: http://blog.kubernetes.io/search/label/containers
|
||||
[15]: http://blog.kubernetes.io/search/label/docker
|
||||
[16]: http://blog.kubernetes.io/search/label/k8s
|
||||
[17]: http://blog.kubernetes.io/search/label/kubernetes
|
||||
[18]: http://blog.kubernetes.io/search/label/open%20source
|
||||
[19]: http://blog.kubernetes.io/2015/04/kubernetes-and-mesosphere-dcos.html "Newer Post"
|
||||
[20]: http://blog.kubernetes.io/2015/04/introducing-kubernetes-v1beta3.html "Older Post"
|
||||
[21]: http://blog.kubernetes.io/feeds/630924463010638300/comments/default
|
||||
[22]: https://img2.blogblog.com/img/widgets/arrow_dropdown.gif
|
||||
[23]: https://img1.blogblog.com/img/icon_feed12.png
|
||||
[24]: https://img1.blogblog.com/img/widgets/subscribe-netvibes.png
|
||||
[25]: https://www.netvibes.com/subscribe.php?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2Fposts%2Fdefault
|
||||
[26]: https://img1.blogblog.com/img/widgets/subscribe-yahoo.png
|
||||
[27]: https://add.my.yahoo.com/content?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2Fposts%2Fdefault
|
||||
[28]: http://blog.kubernetes.io/feeds/posts/default
|
||||
[29]: https://www.netvibes.com/subscribe.php?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2F630924463010638300%2Fcomments%2Fdefault
|
||||
[30]: https://add.my.yahoo.com/content?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2F630924463010638300%2Fcomments%2Fdefault
|
||||
[31]: https://resources.blogblog.com/img/icon18_wrench_allbkg.png
|
||||
[32]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=Subscribe&widgetId=Subscribe1&action=editWidget§ionId=sidebar-right-1 "Edit"
|
||||
[33]: https://twitter.com/kubernetesio
|
||||
[34]: https://github.com/kubernetes/kubernetes
|
||||
[35]: http://slack.k8s.io/
|
||||
[36]: http://stackoverflow.com/questions/tagged/kubernetes
|
||||
[37]: http://get.k8s.io/
|
||||
[38]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=HTML&widgetId=HTML2&action=editWidget§ionId=sidebar-right-1 "Edit"
|
||||
[39]: javascript:void(0)
|
||||
[40]: http://blog.kubernetes.io/2018/
|
||||
[41]: http://blog.kubernetes.io/2018/01/
|
||||
[42]: http://blog.kubernetes.io/2017/
|
||||
[43]: http://blog.kubernetes.io/2017/12/
|
||||
[44]: http://blog.kubernetes.io/2017/11/
|
||||
[45]: http://blog.kubernetes.io/2017/10/
|
||||
[46]: http://blog.kubernetes.io/2017/09/
|
||||
[47]: http://blog.kubernetes.io/2017/08/
|
||||
[48]: http://blog.kubernetes.io/2017/07/
|
||||
[49]: http://blog.kubernetes.io/2017/06/
|
||||
[50]: http://blog.kubernetes.io/2017/05/
|
||||
[51]: http://blog.kubernetes.io/2017/04/
|
||||
[52]: http://blog.kubernetes.io/2017/03/
|
||||
[53]: http://blog.kubernetes.io/2017/02/
|
||||
[54]: http://blog.kubernetes.io/2017/01/
|
||||
[55]: http://blog.kubernetes.io/2016/
|
||||
[56]: http://blog.kubernetes.io/2016/12/
|
||||
[57]: http://blog.kubernetes.io/2016/11/
|
||||
[58]: http://blog.kubernetes.io/2016/10/
|
||||
[59]: http://blog.kubernetes.io/2016/09/
|
||||
[60]: http://blog.kubernetes.io/2016/08/
|
||||
[61]: http://blog.kubernetes.io/2016/07/
|
||||
[62]: http://blog.kubernetes.io/2016/06/
|
||||
[63]: http://blog.kubernetes.io/2016/05/
|
||||
[64]: http://blog.kubernetes.io/2016/04/
|
||||
[65]: http://blog.kubernetes.io/2016/03/
|
||||
[66]: http://blog.kubernetes.io/2016/02/
|
||||
[67]: http://blog.kubernetes.io/2016/01/
|
||||
[68]: http://blog.kubernetes.io/2015/
|
||||
[69]: http://blog.kubernetes.io/2015/12/
|
||||
[70]: http://blog.kubernetes.io/2015/11/
|
||||
[71]: http://blog.kubernetes.io/2015/10/
|
||||
[72]: http://blog.kubernetes.io/2015/09/
|
||||
[73]: http://blog.kubernetes.io/2015/08/
|
||||
[74]: http://blog.kubernetes.io/2015/07/
|
||||
[75]: http://blog.kubernetes.io/2015/06/
|
||||
[76]: http://blog.kubernetes.io/2015/05/
|
||||
[77]: http://blog.kubernetes.io/2015/04/
|
||||
[78]: http://blog.kubernetes.io/2015/04/weekly-kubernetes-community-hangout_29.html
|
||||
[79]: http://blog.kubernetes.io/2015/04/borg-predecessor-to-kubernetes.html
|
||||
[80]: http://blog.kubernetes.io/2015/04/kubernetes-and-mesosphere-dcos.html
|
||||
[81]: http://blog.kubernetes.io/2015/04/weekly-kubernetes-community-hangout_17.html
|
||||
[82]: http://blog.kubernetes.io/2015/04/introducing-kubernetes-v1beta3.html
|
||||
[83]: http://blog.kubernetes.io/2015/04/kubernetes-release-0150.html
|
||||
[84]: http://blog.kubernetes.io/2015/04/weekly-kubernetes-community-hangout_11.html
|
||||
[85]: http://blog.kubernetes.io/2015/04/faster-than-speeding-latte.html
|
||||
[86]: http://blog.kubernetes.io/2015/04/weekly-kubernetes-community-hangout.html
|
||||
[87]: http://blog.kubernetes.io/2015/03/
|
||||
[88]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=BlogArchive&widgetId=BlogArchive1&action=editWidget§ionId=sidebar-right-1 "Edit"
|
||||
[89]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=HTML&widgetId=HTML1&action=editWidget§ionId=sidebar-right-1 "Edit"
|
||||
[90]: https://www.blogger.com
|
||||
[91]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=Attribution&widgetId=Attribution1&action=editWidget§ionId=footer-3 "Edit"
|
||||
|
||||
[*[3:27 PM]: 2015-04-17T15:27:00-07:00
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - April 24 2015 "
|
||||
date: Friday, April 30, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
|
||||
Agenda:
|
||||
|
||||
* Flocker and Kubernetes integration demo
|
||||
|
||||
Notes:
|
||||
|
||||
* flocker and kubernetes integration demo
|
||||
* * Flocker Q/A
|
||||
|
||||
* Does the file still exists on node1 after migration?
|
||||
|
||||
* Brendan: Any plan this to make it a volume? So we don't need powerstrip?
|
||||
|
||||
* Luke: Need to figure out interest to decide if we want to make it a first-class persistent disk provider in kube.
|
||||
|
||||
* Brendan: Removing need for powerstrip would make it simple to use. Totally go for it.
|
||||
|
||||
* Tim: Should take no more than 45 minutes to add it to kubernetes:)
|
||||
|
||||
* Derek: Contrast this with persistent volumes and claims?
|
||||
|
||||
* Luke: Not much difference, except for the novel ZFS based backend. Makes workloads really portable.
|
||||
|
||||
* Tim: very different than network-based volumes. Its interesting that it is the only offering that allows upgrading media.
|
||||
|
||||
* Brendan: claims, how does it look for replicated claims? eg Cassandra wants to have replicated data underneath. It would be efficient to scale up and down. Create storage on the fly based on load dynamically. Its step beyond taking snapshots - programmatically creating replicas with preallocation.
|
||||
|
||||
* Tim: helps with auto-provisioning.
|
||||
|
||||
* Brian: Does flocker requires any other component?
|
||||
|
||||
* Kai: Flocker control service co-located with the master. (dia on blog post). Powerstrip + Powerstrip Flocker. Very interested in mpersisting state in etcd. It keeps metadata about each volume.
|
||||
|
||||
* Brendan: In future, flocker can be a plugin and we'll take care of persistence. Post v1.0.
|
||||
|
||||
* Brian: Interested in adding generic plugin for services like flocker.
|
||||
|
||||
* Luke: Zfs can become really valuable when scaling to lot of containers on a single node.
|
||||
|
||||
* Alex: Can flocker service can be run as a pod?
|
||||
|
||||
* Kai: Yes, only requirement is the flocker control service should be able to talk to zfs agent. zfs agent needs to be installed on the host and zfs binaries need to be accessible.
|
||||
|
||||
* Brendan: In theory, all zfs bits can be put it into a container with devices.
|
||||
|
||||
* Luke: Yes, still working through cross-container mounting issue.
|
||||
|
||||
* Tim: pmorie is working through it to make kubelet work in a container. Possible re-use.
|
||||
|
||||
* Kai: Cinder support is coming. Few days away.
|
||||
* Bob: What's the process of pushing kube to GKE? Need more visibility for confidence.
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " AppC Support for Kubernetes through RKT "
|
||||
date: Tuesday, May 04, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
We very recently accepted a pull request to the Kubernetes project to add appc support for the Kubernetes community. Appc is a new open container specification that was initiated by CoreOS, and is supported through CoreOS rkt container runtime.
|
||||
|
||||
|
||||
|
||||
This is an important step forward for the Kubernetes project and for the broader containers community. It adds flexibility and choice to the container-verse and brings the promise of compelling new security and performance capabilities to the Kubernetes developer.
|
||||
|
||||
|
||||
|
||||
Container based runtimes (like Docker or rkt) when paired with smart orchestration technologies (like Kubernetes and/or Apache Mesos) are a legitimate disruption to the way that developers build and run their applications. While the supporting technologies are relatively nascent, they do offer the promise of some very powerful new ways to assemble, deploy, update, debug and extend solutions. I believe that the world has not yet felt the full potential of containers and the next few years are going to be particularly exciting! With that in mind it makes sense for several projects to emerge with different properties and different purposes. It also makes sense to be able to plug together different pieces (whether it be the container runtime or the orchestrator) based on the specific needs of a given application.
|
||||
|
||||
|
||||
|
||||
Docker has done an amazing job of democratizing container technologies and making them accessible to the outside world, and we expect Kubernetes to support Docker indefinitely. CoreOS has also started to do interesting work with rkt to create an elegant, clean, simple and open platform that offers some really interesting properties. It looks poised deliver a secure and performant operating environment for containers. The Kubernetes team has been working with the appc team at CoreOS for a while and in many ways they built rkt with Kubernetes in mind as a simple pluggable runtime component.
|
||||
|
||||
|
||||
|
||||
The really nice thing is that with Kubernetes you can now pick the container runtime that works best for you based on your workloads’ needs, change runtimes without having the replace your cluster environment, or even mix together applications where different parts are running in different container runtimes in the same cluster. Additional choices can’t help but ultimately benefit the end developer.
|
||||
|
||||
-- Craig McLuckie
|
||||
Google Product Manager and Kubernetes co-founder
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Docker and Kubernetes and AppC "
|
||||
date: Tuesday, May 18, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Recently we announced the intent in Kubernetes, our open source cluster manager, to support AppC and RKT, an alternative container format that has been driven by CoreOS with input from many companies (including Google). This announcement has generated a surprising amount of buzz and has been construed as a move from Google to support Appc over Docker. Many have taken it as signal that Google is moving away from supporting Docker. I would like to take a moment to clarify Google’s position in this.
|
||||
|
||||
|
||||
Google has consistently supported the Docker initiative and has invested heavily in Docker. In the early days of containers, we decided to de-emphasize our own open source offering (LMCTFY) and to instead focus on Docker. As a result of that we have two engineers that are active maintainers of LibContainer, a critical piece of the Docker ecosystem and are working closely with Docker to add many additional features and capabilities. Docker is currently the only supported runtime in GKE (Google Container Engine) our commercial containers product, and in GAE (Google App Engine), our Platform-as-a-Service product.
|
||||
|
||||
|
||||
While we may introduce AppC support at some point in the future to GKE based on our customers demand, we intend to continue to support the Docker project and product, and Docker the company indefinitely. To date Docker is by far the most mature and widely used container offering in the market, with over 400 million downloads. It has been production ready for almost a year and seen widespread use in industry, and also here inside Google.
|
||||
|
||||
|
||||
Beyond the obvious traction Docker has in the market, we are heartened by many of Docker’s recent initiatives to open the project and support ‘batteries included, but swappable options across the stack and recognize that it offers a great developer experience for engineers new to the containers world. We are encouraged, for example, by the separation of the Docker Machine and Swarm projects from the core runtime, and are glad to see support for Docker Machine emerging for Google Compute Engine.
|
||||
|
||||
|
||||
Our intent with our announcement for AppC and RKT support was to establish Kubernetes (our open source project) as a neutral ground in the world of containers. Customers should be able to pick their container runtime and format based solely on its technical merits, and we do see AppC as offering some legitimate potential merits as the technology matures. Somehow this was misconstrued as an ‘a vs b’ selection which is simply untrue. The world is almost always better for having choice, and it is perfectly natural that different tools should be available for different purposes.
|
||||
|
||||
|
||||
Stepping back a little, one must recognize that Docker has done remarkable work in democratizing container technologies and making them accessible to everyone. We believe that Docker will continue to drive great experiences for developers looking to use containers and plan to support this technology and its burgeoning community indefinitely. We, for one, are looking forward to the upcoming Dockercon where Brendan Burns (a Kubernetes co-founder) will be talking about the role of Docker in modern distributed systems design.
|
||||
|
||||
|
||||
|
||||
-- Craig McLuckie
|
||||
|
||||
Google Group Product Manager, and Kubernetes Project Co-Founder
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes on OpenStack "
|
||||
date: Wednesday, May 19, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
|
||||
|
||||
[](http://3.bp.blogspot.com/-EOrCHChZJZE/VVZzq43g6CI/AAAAAAAAF-E/JUilRHk369E/s1600/Untitled%2Bdrawing.jpg)
|
||||
|
||||
|
||||
|
||||
Today, the [OpenStack foundation](https://www.openstack.org/foundation/) made it even easier for you deploy and manage clusters of Docker containers on OpenStack clouds by including Kubernetes in its [Community App Catalog](http://apps.openstack.org/). At a keynote today at the OpenStack Summit in Vancouver, Mark Collier, COO of the OpenStack Foundation, and Craig Peters, [Mirantis](https://www.mirantis.com/) product line manager, demonstrated the Community App Catalog workflow by launching a Kubernetes cluster in a matter of seconds by leveraging the compute, storage, networking and identity systems already present in an OpenStack cloud.
|
||||
|
||||
|
||||
|
||||
The entries in the catalog include not just the ability to [start a Kubernetes cluster](http://apps.openstack.org/#tab=murano-apps&asset=Kubernetes%20Cluster), but also a range of applications deployed in Docker containers managed by Kubernetes. These applications include:
|
||||
|
||||
|
||||
|
||||
-
|
||||
Apache web server
|
||||
-
|
||||
Nginx web server
|
||||
-
|
||||
Crate - The Distributed Database for Docker
|
||||
-
|
||||
GlassFish - Java EE 7 Application Server
|
||||
-
|
||||
Tomcat - An open-source web server and servlet container
|
||||
-
|
||||
InfluxDB - An open-source, distributed, time series database
|
||||
-
|
||||
Grafana - Metrics dashboard for InfluxDB
|
||||
-
|
||||
Jenkins - An extensible open source continuous integration server
|
||||
-
|
||||
MariaDB database
|
||||
-
|
||||
MySql database
|
||||
-
|
||||
Redis - Key-value cache and store
|
||||
-
|
||||
PostgreSQL database
|
||||
-
|
||||
MongoDB NoSQL database
|
||||
-
|
||||
Zend Server - The Complete PHP Application Platform
|
||||
|
||||
|
||||
|
||||
This list will grow, and is curated [here](https://github.com/openstack/murano-apps/tree/master/Docker/Kubernetes). You can examine (and contribute to) the YAML file that tells Murano how to install and start the Kubernetes cluster [here](https://github.com/openstack/murano-apps/blob/master/Docker/Kubernetes/KubernetesCluster/package/Classes/KubernetesCluster.yaml).
|
||||
|
||||
|
||||
|
||||
[The Kubernetes open source project](https://github.com/GoogleCloudPlatform/kubernetes) has continued to see fantastic community adoption and increasing momentum, with over 11,000 commits and 7,648 stars on GitHub. With supporters ranging from Red Hat and Intel to CoreOS and Box.net, it has come to represent a range of customer interests ranging from enterprise IT to cutting edge startups. We encourage you to give it a try, give us your feedback, and get involved in our growing community.
|
||||
|
||||
|
||||
- Martin Buhr, Product Manager, Kubernetes Open Source Project
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Release: 0.16.0 "
|
||||
date: Tuesday, May 11, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Release Notes:
|
||||
|
||||
- Bring up a kuberenetes cluster using coreos image as worker nodes [#7445](https://github.com/GoogleCloudPlatform/kubernetes/pull/7445) (dchen1107)
|
||||
- Cloning v1beta3 as v1 and exposing it in the apiserver [#7454](https://github.com/GoogleCloudPlatform/kubernetes/pull/7454) (nikhiljindal)
|
||||
- API Conventions for Late-initializers [#7366](https://github.com/GoogleCloudPlatform/kubernetes/pull/7366) (erictune)
|
||||
- Upgrade Elasticsearch to 1.5.2 for cluster logging [#7455](https://github.com/GoogleCloudPlatform/kubernetes/pull/7455) (satnam6502)
|
||||
- Make delete actually stop resources by default. [#7210](https://github.com/GoogleCloudPlatform/kubernetes/pull/7210) (brendandburns)
|
||||
- Change kube2sky to use token-system-dns secret, point at https endpoint ... [#7154](https://github.com/GoogleCloudPlatform/kubernetes/pull/7154)(cjcullen)
|
||||
- Updated CoreOS bare metal docs for 0.15.0 [#7364](https://github.com/GoogleCloudPlatform/kubernetes/pull/7364) (hvolkmer)
|
||||
- Print named ports in 'describe service' [#7424](https://github.com/GoogleCloudPlatform/kubernetes/pull/7424) (thockin)
|
||||
- AWS
|
||||
- Return public & private addresses in GetNodeAddresses [#7040](https://github.com/GoogleCloudPlatform/kubernetes/pull/7040) (justinsb)
|
||||
- Improving getting existing VPC and subnet [#6606](https://github.com/GoogleCloudPlatform/kubernetes/pull/6606) (gust1n)
|
||||
- Set hostname\_override for minions, back to fully-qualified name [#7182](https://github.com/GoogleCloudPlatform/kubernetes/pull/7182) (justinsb)
|
||||
- Conversion to v1beta3
|
||||
- Convert node level logging agents to v1beta3 [#7274](https://github.com/GoogleCloudPlatform/kubernetes/pull/7274) (satnam6502)
|
||||
- Removing more references to v1beta1 from pkg/ [#7128](https://github.com/GoogleCloudPlatform/kubernetes/pull/7128) (nikhiljindal)
|
||||
- update examples/cassandra to api v1beta3 [#7258](https://github.com/GoogleCloudPlatform/kubernetes/pull/7258) (caesarxuchao)
|
||||
- Convert Elasticsearch logging to v1beta3 and de-salt [#7246](https://github.com/GoogleCloudPlatform/kubernetes/pull/7246) (satnam6502)
|
||||
- Update examples/storm for v1beta3 [#7231](https://github.com/GoogleCloudPlatform/kubernetes/pull/7231) (bcbroussard)
|
||||
- Update examples/spark for v1beta3 [#7230](https://github.com/GoogleCloudPlatform/kubernetes/pull/7230) (bcbroussard)
|
||||
- Update Kibana RC and service to v1beta3 [#7240](https://github.com/GoogleCloudPlatform/kubernetes/pull/7240) (satnam6502)
|
||||
- Updating the guestbook example to v1beta3 [#7194](https://github.com/GoogleCloudPlatform/kubernetes/pull/7194) (nikhiljindal)
|
||||
- Update Phabricator to v1beta3 example [#7232](https://github.com/GoogleCloudPlatform/kubernetes/pull/7232) (bcbroussard)
|
||||
- Update Kibana pod to speak to Elasticsearch using v1beta3 [#7206](https://github.com/GoogleCloudPlatform/kubernetes/pull/7206) (satnam6502)
|
||||
- Validate Node IPs; clean up validation code [#7180](https://github.com/GoogleCloudPlatform/kubernetes/pull/7180) (ddysher)
|
||||
- Add PortForward to runtime API. [#7391](https://github.com/GoogleCloudPlatform/kubernetes/pull/7391) (vmarmol)
|
||||
- kube-proxy uses token to access port 443 of apiserver [#7303](https://github.com/GoogleCloudPlatform/kubernetes/pull/7303) (erictune)
|
||||
- Move the logging-related directories to where I think they belong [#7014](https://github.com/GoogleCloudPlatform/kubernetes/pull/7014) (a-robinson)
|
||||
- Make client service requests use the default timeout now that external load balancers are created asynchronously [#6870](https://github.com/GoogleCloudPlatform/kubernetes/pull/6870) (a-robinson)
|
||||
- Fix bug in kube-proxy of not updating iptables rules if a service's public IPs change [#6123](https://github.com/GoogleCloudPlatform/kubernetes/pull/6123)(a-robinson)
|
||||
- PersistentVolumeClaimBinder [#6105](https://github.com/GoogleCloudPlatform/kubernetes/pull/6105) (markturansky)
|
||||
- Fixed validation message when trying to submit incorrect secret [#7356](https://github.com/GoogleCloudPlatform/kubernetes/pull/7356) (soltysh)
|
||||
- First step to supporting multiple k8s clusters [#6006](https://github.com/GoogleCloudPlatform/kubernetes/pull/6006) (justinsb)
|
||||
- Parity for namespace handling in secrets E2E [#7361](https://github.com/GoogleCloudPlatform/kubernetes/pull/7361) (pmorie)
|
||||
- Add cleanup policy to RollingUpdater [#6996](https://github.com/GoogleCloudPlatform/kubernetes/pull/6996) (ironcladlou)
|
||||
- Use narrowly scoped interfaces for client access [#6871](https://github.com/GoogleCloudPlatform/kubernetes/pull/6871) (ironcladlou)
|
||||
- Warning about Critical bug in the GlusterFS Volume Plugin [#7319](https://github.com/GoogleCloudPlatform/kubernetes/pull/7319) (wattsteve)
|
||||
- Rolling update
|
||||
- First part of improved rolling update, allow dynamic next replication controller generation. [#7268](https://github.com/GoogleCloudPlatform/kubernetes/pull/7268) (brendandburns)
|
||||
- Further implementation of rolling-update, add rename [#7279](https://github.com/GoogleCloudPlatform/kubernetes/pull/7279) (brendandburns)
|
||||
- Added basic apiserver authz tests. [#7293](https://github.com/GoogleCloudPlatform/kubernetes/pull/7293) (ashcrow)
|
||||
- Retry pod update on version conflict error in e2e test. [#7297](https://github.com/GoogleCloudPlatform/kubernetes/pull/7297) (quinton-hoole)
|
||||
- Add "kubectl validate" command to do a cluster health check. [#6597](https://github.com/GoogleCloudPlatform/kubernetes/pull/6597) (fabioy)
|
||||
- coreos/azure: Weave version bump, various other enhancements [#7224](https://github.com/GoogleCloudPlatform/kubernetes/pull/7224) (errordeveloper)
|
||||
- Azure: Wait for salt completion on cluster initialization [#6576](https://github.com/GoogleCloudPlatform/kubernetes/pull/6576) (jeffmendoza)
|
||||
- Tighten label parsing [#6674](https://github.com/GoogleCloudPlatform/kubernetes/pull/6674) (kargakis)
|
||||
- fix watch of single object [#7263](https://github.com/GoogleCloudPlatform/kubernetes/pull/7263) (lavalamp)
|
||||
- Upgrade go-dockerclient dependency to support CgroupParent [#7247](https://github.com/GoogleCloudPlatform/kubernetes/pull/7247) (guenter)
|
||||
- Make secret volume plugin idempotent [#7166](https://github.com/GoogleCloudPlatform/kubernetes/pull/7166) (pmorie)
|
||||
- Salt reconfiguration to get rid of nginx on GCE [#6618](https://github.com/GoogleCloudPlatform/kubernetes/pull/6618) (roberthbailey)
|
||||
- Revert "Change kube2sky to use token-system-dns secret, point at https e... [#7207](https://github.com/GoogleCloudPlatform/kubernetes/pull/7207) (fabioy)
|
||||
- Pod templates as their own type [#5012](https://github.com/GoogleCloudPlatform/kubernetes/pull/5012) (smarterclayton)
|
||||
- iscsi Test: Add explicit check for attach and detach calls. [#7110](https://github.com/GoogleCloudPlatform/kubernetes/pull/7110) (swagiaal)
|
||||
- Added field selector for listing pods [#7067](https://github.com/GoogleCloudPlatform/kubernetes/pull/7067) (ravigadde)
|
||||
- Record an event on node schedulable changes [#7138](https://github.com/GoogleCloudPlatform/kubernetes/pull/7138) (pravisankar)
|
||||
- Resolve [#6812](https://github.com/GoogleCloudPlatform/kubernetes/issues/6812), limit length of load balancer names [#7145](https://github.com/GoogleCloudPlatform/kubernetes/pull/7145) (caesarxuchao)
|
||||
- Convert error strings to proper validation errors. [#7131](https://github.com/GoogleCloudPlatform/kubernetes/pull/7131) (rjnagal)
|
||||
- ResourceQuota add object count support for secret and volume claims [#6593](https://github.com/GoogleCloudPlatform/kubernetes/pull/6593)(derekwaynecarr)
|
||||
- Use Pod.Spec.Host instead of Pod.Status.HostIP for pod subresources [#6985](https://github.com/GoogleCloudPlatform/kubernetes/pull/6985) (csrwng)
|
||||
- Prioritize deleting the non-running pods when reducing replicas [#6992](https://github.com/GoogleCloudPlatform/kubernetes/pull/6992) (yujuhong)
|
||||
- Kubernetes UI with Dashboard component [#7056](https://github.com/GoogleCloudPlatform/kubernetes/pull/7056) (preillyme)
|
||||
|
||||
To download, please visit https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.16.0
|
||||
|
|
@ -0,0 +1,621 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Release: 0.17.0 "
|
||||
date: Saturday, May 15, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Release Notes:
|
||||
|
||||
* Cleanups
|
||||
|
||||
* Remove old salt configs [#8065][4] (roberthbailey)
|
||||
* Kubelet: minor cleanups [#8069][5] (yujuhong)
|
||||
* v1beta3
|
||||
|
||||
* update example/walkthrough to v1beta3 [#7940][6] (caesarxuchao)
|
||||
* update example/rethinkdb to v1beta3 [#7946][7] (caesarxuchao)
|
||||
* verify the v1beta3 yaml files all work; merge the yaml files [#7917][8] (caesarxuchao)
|
||||
* update examples/cassandra to api v1beta3 [#7258][9] (caesarxuchao)
|
||||
* update service.json in persistent-volume example to v1beta3 [#7899][10] (caesarxuchao)
|
||||
* update mysql-wordpress example to use v1beta3 API [#7864][11] (caesarxuchao)
|
||||
* Update examples/meteor to use API v1beta3 [#7848][12] (caesarxuchao)
|
||||
* update node-selector example to API v1beta3 [#7872][13] (caesarxuchao)
|
||||
* update logging-demo to use API v1beta3; modify the way to access Elasticsearch and Kibana services [#7824][14] (caesarxuchao)
|
||||
* Convert the skydns rc to use v1beta3 and add a health check to it [#7619][15] (a-robinson)
|
||||
* update the hazelcast example to API version v1beta3 [#7728][16] (caesarxuchao)
|
||||
* Fix YAML parsing for v1beta3 objects in the kubelet for file/http [#7515][17] (brendandburns)
|
||||
* Updated kubectl cluster-info to show v1beta3 addresses [#7502][18] (piosz)
|
||||
* Kubelet
|
||||
|
||||
* kubelet: Fix racy kubelet tests. [#7980][19] (yifan-gu)
|
||||
* kubelet/container: Move prober.ContainerCommandRunner to container. [#8079][20] (yifan-gu)
|
||||
* Kubelet: set host field in the pending pod status [#6127][21] (yujuhong)
|
||||
* Fix the kubelet node watch [#6442][22] (yujuhong)
|
||||
* Kubelet: recreate mirror pod if the static pod changes [#6607][23] (yujuhong)
|
||||
* Kubelet: record the timestamp correctly in the runtime cache [#7749][24] (yujuhong)
|
||||
* Kubelet: wait until container runtime is up [#7729][25] (yujuhong)
|
||||
* Kubelet: replace DockerManager with the Runtime interface [#7674][26] (yujuhong)
|
||||
* Kubelet: filter out terminated pods in SyncPods [#7301][27] (yujuhong)
|
||||
* Kubelet: parallelize cleaning up containers in unwanted pods [#7048][28] (yujuhong)
|
||||
* kubelet: Add container runtime option for rkt. [#7952][29] (yifan-gu)
|
||||
* kubelet/rkt: Remove build label. [#7916][30] (yifan-gu)
|
||||
* kubelet/metrics: Move instrumented_docker.go to dockertools. [#7327][31] (yifan-gu)
|
||||
* kubelet/rkt: Add GetPods() for rkt. [#7599][32] (yifan-gu)
|
||||
* kubelet/rkt: Add KillPod() and GetPodStatus() for rkt. [#7605][33] (yifan-gu)
|
||||
* pkg/kubelet: Fix logging. [#4755][34] (yifan-gu)
|
||||
* kubelet: Refactor RunInContainer/ExecInContainer/PortForward. [#6491][35] (yifan-gu)
|
||||
* kubelet/DockerManager: Fix returning empty error from GetPodStatus(). [#6609][36] (yifan-gu)
|
||||
* kubelet: Move pod infra container image setting to dockertools. [#6634][37] (yifan-gu)
|
||||
* kubelet/fake_docker_client: Use self's PID instead of 42 in testing. [#6653][38] (yifan-gu)
|
||||
* kubelet/dockertool: Move Getpods() to DockerManager. [#6778][39] (yifan-gu)
|
||||
* kubelet/dockertools: Add puller interfaces in the containerManager. [#6776][40] (yifan-gu)
|
||||
* kubelet: Introduce PodInfraContainerChanged(). [#6608][41] (yifan-gu)
|
||||
* kubelet/container: Replace DockerCache with RuntimeCache. [#6795][42] (yifan-gu)
|
||||
* kubelet: Clean up computePodContainerChanges. [#6844][43] (yifan-gu)
|
||||
* kubelet: Refactor prober. [#7009][44] (yifan-gu)
|
||||
* kubelet/container: Update the runtime interface. [#7466][45] (yifan-gu)
|
||||
* kubelet: Refactor isPodRunning() in runonce.go [#7477][46] (yifan-gu)
|
||||
* kubelet/rkt: Add basic rkt runtime routines. [#7465][47] (yifan-gu)
|
||||
* kubelet/rkt: Add podInfo. [#7555][48] (yifan-gu)
|
||||
* kubelet/container: Add GetContainerLogs to runtime interface. [#7488][49] (yifan-gu)
|
||||
* kubelet/rkt: Add routines for converting kubelet pod to rkt pod. [#7543][50] (yifan-gu)
|
||||
* kubelet/rkt: Add RunPod() for rkt. [#7589][51] (yifan-gu)
|
||||
* kubelet/rkt: Add RunInContainer()/ExecInContainer()/PortForward(). [#7553][52] (yifan-gu)
|
||||
* kubelet/container: Move ShouldContainerBeRestarted() to runtime. [#7613][53] (yifan-gu)
|
||||
* kubelet/rkt: Add SyncPod() to rkt. [#7611][54] (yifan-gu)
|
||||
* Kubelet: persist restart count of a container [#6794][55] (yujuhong)
|
||||
* kubelet/container: Move pty*.go to container runtime package. [#7951][56] (yifan-gu)
|
||||
* kubelet: Add container runtime option for rkt. [#7900][57] (yifan-gu)
|
||||
* kubelet/rkt: Add docker prefix to image string. [#7803][58] (yifan-gu)
|
||||
* kubelet/rkt: Inject dependencies to rkt. [#7849][59] (yifan-gu)
|
||||
* kubelet/rkt: Remove dependencies on rkt.store [#7859][60] (yifan-gu)
|
||||
* Kubelet talks securely to apiserver [#2387][61] (erictune)
|
||||
* Rename EnvVarSource.FieldPath -> FieldRef and add example [#7592][62] (pmorie)
|
||||
* Add containerized option to kubelet binary [#7741][63] (pmorie)
|
||||
* Ease building kubelet image [#7948][64] (pmorie)
|
||||
* Remove unnecessary bind-mount from dockerized kubelet run [#7854][65] (pmorie)
|
||||
* Add ability to dockerize kubelet in local cluster [#7798][66] (pmorie)
|
||||
* Create docker image for kubelet [#7797][67] (pmorie)
|
||||
* Security context - types, kubelet, admission [#7343][68] (pweil-)
|
||||
* Kubelet: Add rkt as a runtime option [#7743][69] (vmarmol)
|
||||
* Fix kubelet's docker RunInContainer implementation [#7746][70] (vishh)
|
||||
* AWS
|
||||
|
||||
* AWS: Don't try to copy gce_keys in jenkins e2e job [#8018][71] (justinsb)
|
||||
* AWS: Copy some new properties from config-default => config.test [#7992][72] (justinsb)
|
||||
* AWS: make it possible to disable minion public ip assignment [#7928][73] (manolitto)
|
||||
* update AWS CloudFormation template and cloud-configs [#7667][74] (antoineco)
|
||||
* AWS: Fix variable naming that meant not all tokens were written [#7736][75] (justinsb)
|
||||
* AWS: Change apiserver to listen on 443 directly, not through nginx [#7678][76] (justinsb)
|
||||
* AWS: Improving getting existing VPC and subnet [#6606][77] (gust1n)
|
||||
* AWS EBS volume support [#5138][78] (justinsb)
|
||||
* Introduce an 'svc' segment for DNS search [#8089][79] (thockin)
|
||||
* Adds ability to define a prefix for etcd paths [#5707][80] (kbeecher)
|
||||
* Add kubectl log --previous support to view last terminated container log [#7973][81] (dchen1107)
|
||||
* Add a flag to disable legacy APIs [#8083][82] (brendandburns)
|
||||
* make the dockerkeyring handle mutiple matching credentials [#7971][83] (deads2k)
|
||||
* Convert Fluentd to Cloud Logging pod specs to YAML [#8078][84] (satnam6502)
|
||||
* Use etcd to allocate PortalIPs instead of in-mem [#7704][85] (smarterclayton)
|
||||
* eliminate auth-path [#8064][86] (deads2k)
|
||||
* Record failure reasons for image pulling [#7981][87] (yujuhong)
|
||||
* Rate limit replica creation [#7869][88] (bprashanth)
|
||||
* Upgrade to Kibana 4 for cluster logging [#7995][89] (satnam6502)
|
||||
* Added name to kube-dns service [#8049][90] (piosz)
|
||||
* Fix validation by moving it into the resource builder. [#7919][91] (brendandburns)
|
||||
* Add cache with multiple shards to decrease lock contention [#8050][92] (fgrzadkowski)
|
||||
* Delete status from displayable resources [#8039][93] (nak3)
|
||||
* Refactor volume interfaces to receive pod instead of ObjectReference [#8044][94] (pmorie)
|
||||
* fix kube-down for provider gke [#7565][95] (jlowdermilk)
|
||||
* Service port names are required for multi-port [#7786][96] (thockin)
|
||||
* Increase disk size for kubernetes master. [#8051][97] (fgrzadkowski)
|
||||
* expose: Load input object for increased safety [#7774][98] (kargakis)
|
||||
* Improments to conversion methods generator [#7896][99] (wojtek-t)
|
||||
* Added displaying external IPs to kubectl cluster-info [#7557][100] (piosz)
|
||||
* Add missing Errorf formatting directives [#8037][101] (shawnps)
|
||||
* Add startup code to apiserver to migrate etcd keys [#7567][102] (kbeecher)
|
||||
* Use error type from docker go-client instead of string [#8021][103] (ddysher)
|
||||
* Accurately get hardware cpu count in Vagrantfile. [#8024][104] (BenTheElder)
|
||||
* Stop setting a GKE specific version of the kubeconfig file [#7921][105] (roberthbailey)
|
||||
* Make the API server deal with HEAD requests via the service proxy [#7950][106] (satnam6502)
|
||||
* GlusterFS Critical Bug Resolved - Removing warning in README [#7983][107] (wattsteve)
|
||||
* Don't use the first token `uname -n` as the hostname [#7967][108] (yujuhong)
|
||||
* Call kube-down in test-teardown for vagrant. [#7982][109] (BenTheElder)
|
||||
* defaults_tests: verify defaults when converting to an API object [#6235][110] (yujuhong)
|
||||
* Use the full hostname for mirror pod name. [#7910][111] (yujuhong)
|
||||
* Removes RunPod in the Runtime interface [#7657][112] (yujuhong)
|
||||
* Clean up dockertools/manager.go and add more unit tests [#7533][113] (yujuhong)
|
||||
* Adapt pod killing and cleanup for generic container runtime [#7525][114] (yujuhong)
|
||||
* Fix pod filtering in replication controller [#7198][115] (yujuhong)
|
||||
* Print container statuses in `kubectl get pods` [#7116][116] (yujuhong)
|
||||
* Prioritize deleting the non-running pods when reducing replicas [#6992][117] (yujuhong)
|
||||
* Fix locking issue in pod manager [#6872][118] (yujuhong)
|
||||
* Limit the number of concurrent tests in integration.go [#6655][119] (yujuhong)
|
||||
* Fix typos in different config comments [#7931][120] (pmorie)
|
||||
* Update cAdvisor dependency. [#7929][121] (vmarmol)
|
||||
* Ubuntu-distro: deprecate & merge ubuntu single node work to ubuntu cluster node stuff[#5498][122] (resouer)
|
||||
* Add control variables to Jenkins E2E script [#7935][123] (saad-ali)
|
||||
* Check node status as part of validate-cluster.sh. [#7932][124] (fabioy)
|
||||
* Add old endpoint cleanup function [#7821][125] (lavalamp)
|
||||
* Support recovery from in the middle of a rename. [#7620][126] (brendandburns)
|
||||
* Update Exec and Portforward client to use pod subresource [#7715][127] (csrwng)
|
||||
* Added NFS to PV structs [#7564][128] (markturansky)
|
||||
* Fix environment variable error in Vagrant docs [#7904][129] (posita)
|
||||
* Adds a simple release-note builder that scrapes the Github API for recent PRs [#7616][130](brendandburns)
|
||||
* Scheduler ignores nodes that are in a bad state [#7668][131] (bprashanth)
|
||||
* Set GOMAXPROCS for etcd [#7863][132] (fgrzadkowski)
|
||||
* Auto-generated conversion methods calling one another [#7556][133] (wojtek-t)
|
||||
* Bring up a kuberenetes cluster using coreos image as worker nodes [#7445][134] (dchen1107)
|
||||
* Godep: Add godep for rkt. [#7410][135] (yifan-gu)
|
||||
* Add volumeGetter to rkt. [#7870][136] (yifan-gu)
|
||||
* Update cAdvisor dependency. [#7897][137] (vmarmol)
|
||||
* DNS: expose 53/TCP [#7822][138] (thockin)
|
||||
* Set NodeReady=False when docker is dead [#7763][139] (wojtek-t)
|
||||
* Ignore latency metrics for events [#7857][140] (fgrzadkowski)
|
||||
* SecurityContext admission clean up [#7792][141] (pweil-)
|
||||
* Support manually-created and generated conversion functions [#7832][142] (wojtek-t)
|
||||
* Add latency metrics for etcd operations [#7833][143] (fgrzadkowski)
|
||||
* Update errors_test.go [#7885][144] (hurf)
|
||||
* Change signature of container runtime PullImage to allow pull w/ secret [#7861][145] (pmorie)
|
||||
* Fix bug in Service documentation: incorrect location of "selector" in JSON [#7873][146](bkeroackdsc)
|
||||
* Fix controller-manager manifest for providers that don't specify CLUSTER_IP_RANGE[#7876][147] (cjcullen)
|
||||
* Fix controller unittests [#7867][148] (bprashanth)
|
||||
* Enable GCM and GCL instead of InfluxDB on GCE [#7751][149] (saad-ali)
|
||||
* Remove restriction that cluster-cidr be a class-b [#7862][150] (cjcullen)
|
||||
* Fix OpenShift example [#7591][151] (derekwaynecarr)
|
||||
* API Server - pass path name in context of create request for subresource [#7718][152] (csrwng)
|
||||
* Rolling Updates: Add support for --rollback. [#7575][153] (brendandburns)
|
||||
* Update to container-vm-v20150505 (Also updates GCE to Docker 1.6) [#7820][154] (zmerlynn)
|
||||
* Fix metric label [#7830][155] (rhcarvalho)
|
||||
* Fix v1beta1 typos in v1beta2 conversions [#7838][156] (pmorie)
|
||||
* skydns: use the etcd-2.x native syntax, enable IANA attributed ports. [#7764][157](AntonioMeireles)
|
||||
* Added port 6443 to kube-proxy default IP address for api-server [#7794][158] (markllama)
|
||||
* Added client header info for authentication doc. [#7834][159] (ashcrow)
|
||||
* Clean up safe_format_and_mount spam in the startup logs [#7827][160] (zmerlynn)
|
||||
* Set allocate_node_cidrs to be blank by default. [#7829][161] (roberthbailey)
|
||||
* Fix sync problems in [#5246][162] [#7799][163] (cjcullen)
|
||||
* Fix event doc link [#7823][164] (saad-ali)
|
||||
* Cobra update and bash completions fix [#7776][165] (eparis)
|
||||
* Fix kube2sky flakes. Fix tools.GetEtcdVersion to work with etcd > 2.0.7 [#7675][166] (cjcullen)
|
||||
* Change kube2sky to use token-system-dns secret, point at https endpoint ... [#7154][167](cjcullen)
|
||||
* replica: serialize created-by reference [#7468][168] (simon3z)
|
||||
* Inject mounter into volume plugins [#7702][169] (pmorie)
|
||||
* bringing CoreOS cloud-configs up-to-date (against 0.15.x and latest OS' alpha) [#6973][170](AntonioMeireles)
|
||||
* Update kubeconfig-file doc. [#7787][171] (jlowdermilk)
|
||||
* Throw an API error when deleting namespace in termination [#7780][172] (derekwaynecarr)
|
||||
* Fix command field PodExecOptions [#7773][173] (csrwng)
|
||||
* Start ImageManager housekeeping in Run(). [#7785][174] (vmarmol)
|
||||
* fix DeepCopy to properly support runtime.EmbeddedObject [#7769][175] (deads2k)
|
||||
* fix master service endpoint system for multiple masters [#7273][176] (lavalamp)
|
||||
* Add genbashcomp to KUBE_TEST_TARGETS [#7757][177] (nak3)
|
||||
* Change the cloud provider TCPLoadBalancerExists function to GetTCPLoadBalancer...[#7669][178] (a-robinson)
|
||||
* Add containerized option to kubelet binary [#7772][179] (pmorie)
|
||||
* Fix swagger spec [#7779][180] (pmorie)
|
||||
* FIX: Issue [#7750][181] \- Hyperkube docker image needs certificates to connect to cloud-providers[#7755][182] (viklas)
|
||||
* Add build labels to rkt [#7752][183] (vmarmol)
|
||||
* Check license boilerplate for python files [#7672][184] (eparis)
|
||||
* Reliable updates in rollingupdate [#7705][185] (bprashanth)
|
||||
* Don't exit abruptly if there aren't yet any minions right after the cluster is created. [#7650][186](roberthbailey)
|
||||
* Make changes suggested in [#7675][166] [#7742][187] (cjcullen)
|
||||
* A guide to set up kubernetes multiple nodes cluster with flannel on fedora [#7357][188](aveshagarwal)
|
||||
* Setup generators in factory [#7760][189] (kargakis)
|
||||
* Reduce usage of time.After [#7737][190] (lavalamp)
|
||||
* Remove node status from "componentstatuses" call. [#7735][191] (fabioy)
|
||||
* React to failure by growing the remaining clusters [#7614][192] (tamsky)
|
||||
* Fix typo in runtime_cache.go [#7725][193] (pmorie)
|
||||
* Update non-GCE Salt distros to 1.6.0, fallback to ContainerVM Docker version on GCE[#7740][194] (zmerlynn)
|
||||
* Skip SaltStack install if it's already installed [#7744][195] (zmerlynn)
|
||||
* Expose pod name as a label on containers. [#7712][196] (rjnagal)
|
||||
* Log which SSH key is used in e2e SSH test [#7732][197] (mbforbes)
|
||||
* Add a central simple getting started guide with kubernetes guide. [#7649][198] (brendandburns)
|
||||
* Explicitly state the lack of support for 'Requests' for the purposes of scheduling [#7443][199](vishh)
|
||||
* Select IPv4-only from host interfaces [#7721][200] (smarterclayton)
|
||||
* Metrics tests can't run on Mac [#7723][201] (smarterclayton)
|
||||
* Add step to API changes doc for swagger regen [#7727][202] (pmorie)
|
||||
* Add NsenterMounter mount implementation [#7703][203] (pmorie)
|
||||
* add StringSet.HasAny [#7509][204] (deads2k)
|
||||
* Add an integration test that checks for the metrics we expect to be exported from the master [#6941][205] (a-robinson)
|
||||
* Minor bash update found by shellcheck.net [#7722][206] (eparis)
|
||||
* Add --hostport to run-container. [#7536][207] (rjnagal)
|
||||
* Have rkt implement the container Runtime interface [#7659][208] (vmarmol)
|
||||
* Change the order the different versions of API are registered [#7629][209] (caesarxuchao)
|
||||
* expose: Create objects in a generic way [#7699][210] (kargakis)
|
||||
* Requeue rc if a single get/put retry on status.Replicas fails [#7643][211] (bprashanth)
|
||||
* logs for master components [#7316][212] (ArtfulCoder)
|
||||
* cloudproviders: add ovirt getting started guide [#7522][213] (simon3z)
|
||||
* Make rkt-install a oneshot. [#7671][214] (vmarmol)
|
||||
* Provide container_runtime flag to Kubelet in CoreOS. [#7665][215] (vmarmol)
|
||||
* Boilerplate speedup [#7654][216] (eparis)
|
||||
* Log host for failed pod in Density test [#7700][217] (wojtek-t)
|
||||
* Removes spurious quotation mark [#7655][218] (alindeman)
|
||||
* Add kubectl_label to custom functions in bash completion [#7694][219] (nak3)
|
||||
* Enable profiling in kube-controller [#7696][220] (wojtek-t)
|
||||
* Set vagrant test cluster default NUM_MINIONS=2 [#7690][221] (BenTheElder)
|
||||
* Add metrics to measure cache hit ratio [#7695][222] (fgrzadkowski)
|
||||
* Change IP to IP(S) in service columns for kubectl get [#7662][223] (jlowdermilk)
|
||||
* annotate required flags for bash_completions [#7076][224] (eparis)
|
||||
* (minor) Add pgrep debugging to etcd error [#7685][225] (jayunit100)
|
||||
* Fixed nil pointer issue in describe when volume is unbound [#7676][226] (markturansky)
|
||||
* Removed unnecessary closing bracket [#7691][227] (piosz)
|
||||
* Added TerminationGracePeriod field to PodSpec and grace-period flag to kubectl stop[#7432][228] (piosz)
|
||||
* Fix boilerplate in test/e2e/scale.go [#7689][229] (wojtek-t)
|
||||
* Update expiration timeout based on observed latencies [#7628][230] (bprashanth)
|
||||
* Output generated conversion functions/names [#7644][231] (liggitt)
|
||||
* Moved the Scale tests into a scale file. [#7645][232] [#7646][233] (rrati)
|
||||
* Truncate GCE load balancer names to 63 chars [#7609][234] (brendandburns)
|
||||
* Add SyncPod() and remove Kill/Run InContainer(). [#7603][235] (vmarmol)
|
||||
* Merge release 0.16 to master [#7663][236] (brendandburns)
|
||||
* Update license boilerplate for examples/rethinkdb [#7637][237] (eparis)
|
||||
* First part of improved rolling update, allow dynamic next replication controller generation.[#7268][238] (brendandburns)
|
||||
* Add license boilerplate to examples/phabricator [#7638][239] (eparis)
|
||||
* Use generic copyright holder name in license boilerplate [#7597][240] (eparis)
|
||||
* Retry incrementing quota if there is a conflict [#7633][241] (derekwaynecarr)
|
||||
* Remove GetContainers from Runtime interface [#7568][242] (yujuhong)
|
||||
* Add image-related methods to DockerManager [#7578][243] (yujuhong)
|
||||
* Remove more docker references in kubelet [#7586][244] (yujuhong)
|
||||
* Add KillContainerInPod in DockerManager [#7601][245] (yujuhong)
|
||||
* Kubelet: Add container runtime option. [#7652][246] (vmarmol)
|
||||
* bump heapster to v0.11.0 and grafana to v0.7.0 [#7626][247] (idosh)
|
||||
* Build github.com/onsi/ginkgo/ginkgo as a part of the release [#7593][248] (ixdy)
|
||||
* Do not automatically decode runtime.RawExtension [#7490][249] (smarterclayton)
|
||||
* Update changelog. [#7500][250] (brendandburns)
|
||||
* Add SyncPod() to DockerManager and use it in Kubelet [#7610][251] (vmarmol)
|
||||
* Build: Push .md5 and .sha1 files for every file we push to GCS [#7602][252] (zmerlynn)
|
||||
* Fix rolling update --image [#7540][253] (bprashanth)
|
||||
* Update license boilerplate for docs/man/md2man-all.sh [#7636][254] (eparis)
|
||||
* Include shell license boilerplate in examples/k8petstore [#7632][255] (eparis)
|
||||
* Add --cgroup_parent flag to Kubelet to set the parent cgroup for pods [#7277][256] (guenter)
|
||||
* change the current dir to the config dir [#7209][257] (you-n-g)
|
||||
* Set Weave To 0.9.0 And Update Etcd Configuration For Azure [#7158][258] (idosh)
|
||||
* Augment describe to search for matching things if it doesn't match the original resource.[#7467][259] (brendandburns)
|
||||
* Add a simple cache for objects stored in etcd. [#7559][260] (fgrzadkowski)
|
||||
* Rkt gc [#7549][261] (yifan-gu)
|
||||
* Rkt pull [#7550][262] (yifan-gu)
|
||||
* Implement Mount interface using mount(8) and umount(8) [#6400][263] (ddysher)
|
||||
* Trim Fleuntd tag for Cloud Logging [#7588][264] (satnam6502)
|
||||
* GCE CoreOS cluster - set master name based on variable [#7569][265] (bakins)
|
||||
* Capitalization of KubeProxyVersion wrong in JSON [#7535][266] (smarterclayton)
|
||||
* Make nodes report their external IP rather than the master's. [#7530][267] (mbforbes)
|
||||
* Trim cluster log tags to pod name and container name [#7539][268] (satnam6502)
|
||||
* Handle conversion of boolean query parameters with a value of "false" [#7541][269] (csrwng)
|
||||
* Add image-related methods to Runtime interface. [#7532][270] (vmarmol)
|
||||
* Test whether auto-generated conversions weren't manually edited [#7560][271] (wojtek-t)
|
||||
* Mention :latest behavior for image version tag [#7484][272] (colemickens)
|
||||
* readinessProbe calls livenessProbe.Exec.Command which cause "invalid memory address or nil pointer dereference". [#7487][273] (njuicsgz)
|
||||
* Add RuntimeHooks to abstract Kubelet logic [#7520][274] (vmarmol)
|
||||
* Expose URL() on Request to allow building URLs [#7546][275] (smarterclayton)
|
||||
* Add a simple cache for objects stored in etcd [#7288][276] (fgrzadkowski)
|
||||
* Prepare for chaining autogenerated conversion methods [#7431][277] (wojtek-t)
|
||||
* Increase maxIdleConnection limit when creating etcd client in apiserver. [#7353][278] (wojtek-t)
|
||||
* Improvements to generator of conversion methods. [#7354][279] (wojtek-t)
|
||||
* Code to automatically generate conversion methods [#7107][280] (wojtek-t)
|
||||
* Support recovery for anonymous roll outs [#7407][281] (brendandburns)
|
||||
* Bump kube2sky to 1.2. Point it at https endpoint (3rd try). [#7527][282] (cjcullen)
|
||||
* cluster/gce/coreos: Add metadata-service in node.yaml [#7526][283] (yifan-gu)
|
||||
* Move ComputePodChanges to the Docker runtime [#7480][284] (vmarmol)
|
||||
* Cobra rebase [#7510][285] (eparis)
|
||||
* Adding system oom events from kubelet [#6718][286] (vishh)
|
||||
* Move Prober to its own subpackage [#7479][287] (vmarmol)
|
||||
* Fix parallel-e2e.sh to work on my macbook (bash v3.2) [#7513][288] (cjcullen)
|
||||
* Move network plugin TearDown to DockerManager [#7449][289] (vmarmol)
|
||||
* Fixes [#7498][290] \- CoreOS Getting Started Guide had invalid cloud config [#7499][291] (elsonrodriguez)
|
||||
* Fix invalid character '"' after object key:value pair [#7504][292] (resouer)
|
||||
* Fixed kubelet deleting data from volumes on stop ([#7317][293]). [#7503][294] (jsafrane)
|
||||
* Fixing hooks/description to catch API fields without description tags [#7482][295] (nikhiljindal)
|
||||
* cadvisor is obsoleted so kubelet service does not require it. [#7457][296] (aveshagarwal)
|
||||
* Set the default namespace for events to be "default" [#7408][297] (vishh)
|
||||
* Fix typo in namespace conversion [#7446][298] (liggitt)
|
||||
* Convert Secret registry to use update/create strategy, allow filtering by Type [#7419][299] (liggitt)
|
||||
* Use pod namespace when looking for its GlusterFS endpoints. [#7102][300] (jsafrane)
|
||||
* Fixed name of kube-proxy path in deployment scripts. [#7427][301] (jsafrane)
|
||||
|
||||
To download, please visit https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v0.17.0
|
||||
|
||||
|
||||
Simple theme. Powered by [Blogger][385].
|
||||
|
||||
[ ![][327] ][386]
|
||||
|
||||
[1]: http://kubernetes.io/images/nav_logo.svg
|
||||
[2]: http://kubernetes.io/docs/
|
||||
[3]: http://blog.kubernetes.io/
|
||||
[4]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8065 "Remove old salt configs"
|
||||
[5]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8069 "Kubelet: minor cleanups"
|
||||
[6]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7940 "update example/walkthrough to v1beta3"
|
||||
[7]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7946 "update example/rethinkdb to v1beta3"
|
||||
[8]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7917 "verify the v1beta3 yaml files all work; merge the yaml files"
|
||||
[9]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7258 "update examples/cassandra to api v1beta3"
|
||||
[10]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7899 "update service.json in persistent-volume example to v1beta3"
|
||||
[11]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7864 "update mysql-wordpress example to use v1beta3 API"
|
||||
[12]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7848 "Update examples/meteor to use API v1beta3"
|
||||
[13]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7872 "update node-selector example to API v1beta3"
|
||||
[14]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7824 "update logging-demo to use API v1beta3; modify the way to access Elasticsearch and Kibana services"
|
||||
[15]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7619 "Convert the skydns rc to use v1beta3 and add a health check to it"
|
||||
[16]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7728 "update the hazelcast example to API version v1beta3"
|
||||
[17]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7515 "Fix YAML parsing for v1beta3 objects in the kubelet for file/http"
|
||||
[18]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7502 "Updated kubectl cluster-info to show v1beta3 addresses"
|
||||
[19]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7980 "kubelet: Fix racy kubelet tests."
|
||||
[20]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8079 "kubelet/container: Move prober.ContainerCommandRunner to container."
|
||||
[21]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6127 "Kubelet: set host field in the pending pod status"
|
||||
[22]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6442 "Fix the kubelet node watch"
|
||||
[23]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6607 "Kubelet: recreate mirror pod if the static pod changes"
|
||||
[24]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7749 "Kubelet: record the timestamp correctly in the runtime cache"
|
||||
[25]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7729 "Kubelet: wait until container runtime is up"
|
||||
[26]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7674 "Kubelet: replace DockerManager with the Runtime interface"
|
||||
[27]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7301 "Kubelet: filter out terminated pods in SyncPods"
|
||||
[28]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7048 "Kubelet: parallelize cleaning up containers in unwanted pods"
|
||||
[29]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7952 "kubelet: Add container runtime option for rkt."
|
||||
[30]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7916 "kubelet/rkt: Remove build label."
|
||||
[31]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7327 "kubelet/metrics: Move instrumented_docker.go to dockertools."
|
||||
[32]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7599 "kubelet/rkt: Add GetPods() for rkt."
|
||||
[33]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7605 "kubelet/rkt: Add KillPod() and GetPodStatus() for rkt."
|
||||
[34]: https://github.com/GoogleCloudPlatform/kubernetes/pull/4755 "pkg/kubelet: Fix logging."
|
||||
[35]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6491 "kubelet: Refactor RunInContainer/ExecInContainer/PortForward."
|
||||
[36]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6609 "kubelet/DockerManager: Fix returning empty error from GetPodStatus()."
|
||||
[37]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6634 "kubelet: Move pod infra container image setting to dockertools."
|
||||
[38]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6653 "kubelet/fake_docker_client: Use self's PID instead of 42 in testing."
|
||||
[39]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6778 "kubelet/dockertool: Move Getpods() to DockerManager."
|
||||
[40]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6776 "kubelet/dockertools: Add puller interfaces in the containerManager."
|
||||
[41]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6608 "kubelet: Introduce PodInfraContainerChanged()."
|
||||
[42]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6795 "kubelet/container: Replace DockerCache with RuntimeCache."
|
||||
[43]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6844 "kubelet: Clean up computePodContainerChanges."
|
||||
[44]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7009 "kubelet: Refactor prober."
|
||||
[45]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7466 "kubelet/container: Update the runtime interface."
|
||||
[46]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7477 "kubelet: Refactor isPodRunning() in runonce.go"
|
||||
[47]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7465 "kubelet/rkt: Add basic rkt runtime routines."
|
||||
[48]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7555 "kubelet/rkt: Add podInfo."
|
||||
[49]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7488 "kubelet/container: Add GetContainerLogs to runtime interface."
|
||||
[50]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7543 "kubelet/rkt: Add routines for converting kubelet pod to rkt pod."
|
||||
[51]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7589 "kubelet/rkt: Add RunPod() for rkt."
|
||||
[52]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7553 "kubelet/rkt: Add RunInContainer()/ExecInContainer()/PortForward()."
|
||||
[53]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7613 "kubelet/container: Move ShouldContainerBeRestarted() to runtime."
|
||||
[54]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7611 "kubelet/rkt: Add SyncPod() to rkt."
|
||||
[55]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6794 "Kubelet: persist restart count of a container"
|
||||
[56]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7951 "kubelet/container: Move pty*.go to container runtime package."
|
||||
[57]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7900 "kubelet: Add container runtime option for rkt."
|
||||
[58]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7803 "kubelet/rkt: Add docker prefix to image string."
|
||||
[59]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7849 "kubelet/rkt: Inject dependencies to rkt."
|
||||
[60]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7859 "kubelet/rkt: Remove dependencies on rkt.store"
|
||||
[61]: https://github.com/GoogleCloudPlatform/kubernetes/pull/2387 "Kubelet talks securely to apiserver"
|
||||
[62]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7592 "Rename EnvVarSource.FieldPath -> FieldRef and add example"
|
||||
[63]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7741 "Add containerized option to kubelet binary"
|
||||
[64]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7948 "Ease building kubelet image"
|
||||
[65]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7854 "Remove unnecessary bind-mount from dockerized kubelet run"
|
||||
[66]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7798 "Add ability to dockerize kubelet in local cluster"
|
||||
[67]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7797 "Create docker image for kubelet"
|
||||
[68]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7343 "Security context - types, kubelet, admission"
|
||||
[69]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7743 "Kubelet: Add rkt as a runtime option"
|
||||
[70]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7746 "Fix kubelet's docker RunInContainer implementation "
|
||||
[71]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8018 "AWS: Don't try to copy gce_keys in jenkins e2e job"
|
||||
[72]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7992 "AWS: Copy some new properties from config-default => config.test"
|
||||
[73]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7928 "AWS: make it possible to disable minion public ip assignment"
|
||||
[74]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7667 "update AWS CloudFormation template and cloud-configs"
|
||||
[75]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7736 "AWS: Fix variable naming that meant not all tokens were written"
|
||||
[76]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7678 "AWS: Change apiserver to listen on 443 directly, not through nginx"
|
||||
[77]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6606 "AWS: Improving getting existing VPC and subnet"
|
||||
[78]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5138 "AWS EBS volume support"
|
||||
[79]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8089 "Introduce an 'svc' segment for DNS search"
|
||||
[80]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5707 "Adds ability to define a prefix for etcd paths"
|
||||
[81]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7973 "Add kubectl log --previous support to view last terminated container log"
|
||||
[82]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8083 "Add a flag to disable legacy APIs"
|
||||
[83]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7971 "make the dockerkeyring handle mutiple matching credentials"
|
||||
[84]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8078 "Convert Fluentd to Cloud Logging pod specs to YAML"
|
||||
[85]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7704 "Use etcd to allocate PortalIPs instead of in-mem"
|
||||
[86]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8064 "eliminate auth-path"
|
||||
[87]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7981 "Record failure reasons for image pulling"
|
||||
[88]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7869 "Rate limit replica creation"
|
||||
[89]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7995 "Upgrade to Kibana 4 for cluster logging"
|
||||
[90]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8049 "Added name to kube-dns service"
|
||||
[91]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7919 "Fix validation by moving it into the resource builder."
|
||||
[92]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8050 "Add cache with multiple shards to decrease lock contention"
|
||||
[93]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8039 "Delete status from displayable resources"
|
||||
[94]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8044 "Refactor volume interfaces to receive pod instead of ObjectReference"
|
||||
[95]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7565 "fix kube-down for provider gke"
|
||||
[96]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7786 "Service port names are required for multi-port"
|
||||
[97]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8051 "Increase disk size for kubernetes master."
|
||||
[98]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7774 "expose: Load input object for increased safety"
|
||||
[99]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7896 "Improments to conversion methods generator"
|
||||
[100]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7557 "Added displaying external IPs to kubectl cluster-info"
|
||||
[101]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8037 "Add missing Errorf formatting directives"
|
||||
[102]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7567 "WIP: Add startup code to apiserver to migrate etcd keys"
|
||||
[103]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8021 "Use error type from docker go-client instead of string"
|
||||
[104]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8024 "Accurately get hardware cpu count in Vagrantfile."
|
||||
[105]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7921 "Stop setting a GKE specific version of the kubeconfig file"
|
||||
[106]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7950 "Make the API server deal with HEAD requests via the service proxy"
|
||||
[107]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7983 "GlusterFS Critical Bug Resolved - Removing warning in README"
|
||||
[108]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7967 "Don't use the first token `uname -n` as the hostname"
|
||||
[109]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7982 "Call kube-down in test-teardown for vagrant."
|
||||
[110]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6235 "defaults_tests: verify defaults when converting to an API object"
|
||||
[111]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7910 "Use the full hostname for mirror pod name."
|
||||
[112]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7657 "Removes RunPod in the Runtime interface"
|
||||
[113]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7533 "Clean up dockertools/manager.go and add more unit tests"
|
||||
[114]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7525 "Adapt pod killing and cleanup for generic container runtime"
|
||||
[115]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7198 "Fix pod filtering in replication controller"
|
||||
[116]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7116 "Print container statuses in `kubectl get pods`"
|
||||
[117]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6992 "Prioritize deleting the non-running pods when reducing replicas"
|
||||
[118]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6872 "Fix locking issue in pod manager"
|
||||
[119]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6655 "Limit the number of concurrent tests in integration.go"
|
||||
[120]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7931 "Fix typos in different config comments"
|
||||
[121]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7929 "Update cAdvisor dependency."
|
||||
[122]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5498 "Ubuntu-distro: deprecate & merge ubuntu single node work to ubuntu cluster node stuff"
|
||||
[123]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7935 "Add control variables to Jenkins E2E script"
|
||||
[124]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7932 "Check node status as part of validate-cluster.sh."
|
||||
[125]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7821 "Add old endpoint cleanup function"
|
||||
[126]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7620 "Support recovery from in the middle of a rename."
|
||||
[127]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7715 "Update Exec and Portforward client to use pod subresource"
|
||||
[128]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7564 "Added NFS to PV structs"
|
||||
[129]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7904 "Fix environment variable error in Vagrant docs"
|
||||
[130]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7616 "Adds a simple release-note builder that scrapes the Github API for recent PRs"
|
||||
[131]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7668 "Scheduler ignores nodes that are in a bad state"
|
||||
[132]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7863 "Set GOMAXPROCS for etcd"
|
||||
[133]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7556 "Auto-generated conversion methods calling one another"
|
||||
[134]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7445 "Bring up a kuberenetes cluster using coreos image as worker nodes"
|
||||
[135]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7410 "Godep: Add godep for rkt."
|
||||
[136]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7870 "Add volumeGetter to rkt."
|
||||
[137]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7897 "Update cAdvisor dependency."
|
||||
[138]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7822 "DNS: expose 53/TCP"
|
||||
[139]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7763 "Set NodeReady=False when docker is dead"
|
||||
[140]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7857 "Ignore latency metrics for events"
|
||||
[141]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7792 "SecurityContext admission clean up"
|
||||
[142]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7832 "Support manually-created and generated conversion functions"
|
||||
[143]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7833 "Add latency metrics for etcd operations"
|
||||
[144]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7885 "Update errors_test.go"
|
||||
[145]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7861 "Change signature of container runtime PullImage to allow pull w/ secret"
|
||||
[146]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7873 "Fix bug in Service documentation: incorrect location of "selector" in JSON"
|
||||
[147]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7876 "Fix controller-manager manifest for providers that don't specify CLUSTER_IP_RANGE"
|
||||
[148]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7867 "Fix controller unittests"
|
||||
[149]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7751 "Enable GCM and GCL instead of InfluxDB on GCE"
|
||||
[150]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7862 "Remove restriction that cluster-cidr be a class-b"
|
||||
[151]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7591 "Fix OpenShift example"
|
||||
[152]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7718 "API Server - pass path name in context of create request for subresource"
|
||||
[153]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7575 "Rolling Updates: Add support for --rollback."
|
||||
[154]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7820 "Update to container-vm-v20150505 (Also updates GCE to Docker 1.6)"
|
||||
[155]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7830 "Fix metric label"
|
||||
[156]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7838 "Fix v1beta1 typos in v1beta2 conversions"
|
||||
[157]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7764 "skydns: use the etcd-2.x native syntax, enable IANA attributed ports."
|
||||
[158]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7794 "Added port 6443 to kube-proxy default IP address for api-server"
|
||||
[159]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7834 "Added client header info for authentication doc."
|
||||
[160]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7827 "Clean up safe_format_and_mount spam in the startup logs"
|
||||
[161]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7829 "Set allocate_node_cidrs to be blank by default."
|
||||
[162]: https://github.com/GoogleCloudPlatform/kubernetes/pull/5246 "Make nodecontroller configure nodes' pod IP ranges"
|
||||
[163]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7799 "Fix sync problems in #5246"
|
||||
[164]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7823 "Fix event doc link"
|
||||
[165]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7776 "Cobra update and bash completions fix"
|
||||
[166]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7675 "Fix kube2sky flakes. Fix tools.GetEtcdVersion to work with etcd > 2.0.7"
|
||||
[167]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7154 "Change kube2sky to use token-system-dns secret, point at https endpoint ..."
|
||||
[168]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7468 "replica: serialize created-by reference"
|
||||
[169]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7702 "Inject mounter into volume plugins"
|
||||
[170]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6973 "bringing CoreOS cloud-configs up-to-date (against 0.15.x and latest OS' alpha) "
|
||||
[171]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7787 "Update kubeconfig-file doc."
|
||||
[172]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7780 "Throw an API error when deleting namespace in termination"
|
||||
[173]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7773 "Fix command field PodExecOptions"
|
||||
[174]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7785 "Start ImageManager housekeeping in Run()."
|
||||
[175]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7769 "fix DeepCopy to properly support runtime.EmbeddedObject"
|
||||
[176]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7273 "fix master service endpoint system for multiple masters"
|
||||
[177]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7757 "Add genbashcomp to KUBE_TEST_TARGETS"
|
||||
[178]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7669 "Change the cloud provider TCPLoadBalancerExists function to GetTCPLoadBalancer..."
|
||||
[179]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7772 "Add containerized option to kubelet binary"
|
||||
[180]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7779 "Fix swagger spec"
|
||||
[181]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7750 "Hyperkube image requires root certificates to work with cloud-providers (at least AWS)"
|
||||
[182]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7755 "FIX: Issue #7750 - Hyperkube docker image needs certificates to connect to cloud-providers"
|
||||
[183]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7752 "Add build labels to rkt"
|
||||
[184]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7672 "Check license boilerplate for python files"
|
||||
[185]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7705 "Reliable updates in rollingupdate"
|
||||
[186]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7650 "Don't exit abruptly if there aren't yet any minions right after the cluster is created."
|
||||
[187]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7742 "Make changes suggested in #7675"
|
||||
[188]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7357 "A guide to set up kubernetes multiple nodes cluster with flannel on fedora"
|
||||
[189]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7760 "Setup generators in factory"
|
||||
[190]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7737 "Reduce usage of time.After"
|
||||
[191]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7735 "Remove node status from "componentstatuses" call."
|
||||
[192]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7614 "React to failure by growing the remaining clusters"
|
||||
[193]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7725 "Fix typo in runtime_cache.go"
|
||||
[194]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7740 "Update non-GCE Salt distros to 1.6.0, fallback to ContainerVM Docker version on GCE"
|
||||
[195]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7744 "Skip SaltStack install if it's already installed"
|
||||
[196]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7712 "Expose pod name as a label on containers."
|
||||
[197]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7732 "Log which SSH key is used in e2e SSH test"
|
||||
[198]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7649 "Add a central simple getting started guide with kubernetes guide."
|
||||
[199]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7443 "Explicitly state the lack of support for 'Requests' for the purposes of scheduling"
|
||||
[200]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7721 "Select IPv4-only from host interfaces"
|
||||
[201]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7723 "Metrics tests can't run on Mac"
|
||||
[202]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7727 "Add step to API changes doc for swagger regen"
|
||||
[203]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7703 "Add NsenterMounter mount implementation"
|
||||
[204]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7509 "add StringSet.HasAny"
|
||||
[205]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6941 "Add an integration test that checks for the metrics we expect to be exported from the master"
|
||||
[206]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7722 "Minor bash update found by shellcheck.net"
|
||||
[207]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7536 "Add --hostport to run-container."
|
||||
[208]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7659 "Have rkt implement the container Runtime interface"
|
||||
[209]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7629 "Change the order the different versions of API are registered "
|
||||
[210]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7699 "expose: Create objects in a generic way"
|
||||
[211]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7643 "Requeue rc if a single get/put retry on status.Replicas fails"
|
||||
[212]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7316 "logs for master components"
|
||||
[213]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7522 "cloudproviders: add ovirt getting started guide"
|
||||
[214]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7671 "Make rkt-install a oneshot."
|
||||
[215]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7665 "Provide container_runtime flag to Kubelet in CoreOS."
|
||||
[216]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7654 "Boilerplate speedup"
|
||||
[217]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7700 "Log host for failed pod in Density test"
|
||||
[218]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7655 "Removes spurious quotation mark"
|
||||
[219]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7694 "Add kubectl_label to custom functions in bash completion"
|
||||
[220]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7696 "Enable profiling in kube-controller"
|
||||
[221]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7690 "Set vagrant test cluster default NUM_MINIONS=2"
|
||||
[222]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7695 "Add metrics to measure cache hit ratio"
|
||||
[223]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7662 "Change IP to IP(S) in service columns for kubectl get"
|
||||
[224]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7076 "annotate required flags for bash_completions"
|
||||
[225]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7685 "(minor) Add pgrep debugging to etcd error"
|
||||
[226]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7676 "Fixed nil pointer issue in describe when volume is unbound"
|
||||
[227]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7691 "Removed unnecessary closing bracket"
|
||||
[228]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7432 "Added TerminationGracePeriod field to PodSpec and grace-period flag to kubectl stop"
|
||||
[229]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7689 "Fix boilerplate in test/e2e/scale.go"
|
||||
[230]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7628 "Update expiration timeout based on observed latencies"
|
||||
[231]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7644 "Output generated conversion functions/names"
|
||||
[232]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7645 "Move the scale tests into a separate file"
|
||||
[233]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7646 "Moved the Scale tests into a scale file. #7645"
|
||||
[234]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7609 "Truncate GCE load balancer names to 63 chars"
|
||||
[235]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7603 "Add SyncPod() and remove Kill/Run InContainer()."
|
||||
[236]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7663 "Merge release 0.16 to master"
|
||||
[237]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7637 "Update license boilerplate for examples/rethinkdb"
|
||||
[238]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7268 "First part of improved rolling update, allow dynamic next replication controller generation."
|
||||
[239]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7638 "Add license boilerplate to examples/phabricator"
|
||||
[240]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7597 "Use generic copyright holder name in license boilerplate"
|
||||
[241]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7633 "Retry incrementing quota if there is a conflict"
|
||||
[242]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7568 "Remove GetContainers from Runtime interface"
|
||||
[243]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7578 "Add image-related methods to DockerManager"
|
||||
[244]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7586 "Remove more docker references in kubelet"
|
||||
[245]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7601 "Add KillContainerInPod in DockerManager"
|
||||
[246]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7652 "Kubelet: Add container runtime option."
|
||||
[247]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7626 "bump heapster to v0.11.0 and grafana to v0.7.0"
|
||||
[248]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7593 "Build github.com/onsi/ginkgo/ginkgo as a part of the release"
|
||||
[249]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7490 "Do not automatically decode runtime.RawExtension"
|
||||
[250]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7500 "Update changelog."
|
||||
[251]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7610 "Add SyncPod() to DockerManager and use it in Kubelet"
|
||||
[252]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7602 "Build: Push .md5 and .sha1 files for every file we push to GCS"
|
||||
[253]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7540 "Fix rolling update --image "
|
||||
[254]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7636 "Update license boilerplate for docs/man/md2man-all.sh"
|
||||
[255]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7632 "Include shell license boilerplate in examples/k8petstore"
|
||||
[256]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7277 "Add --cgroup_parent flag to Kubelet to set the parent cgroup for pods"
|
||||
[257]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7209 "change the current dir to the config dir"
|
||||
[258]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7158 "Set Weave To 0.9.0 And Update Etcd Configuration For Azure"
|
||||
[259]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7467 "Augment describe to search for matching things if it doesn't match the original resource."
|
||||
[260]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7559 "Add a simple cache for objects stored in etcd."
|
||||
[261]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7549 "Rkt gc"
|
||||
[262]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7550 "Rkt pull"
|
||||
[263]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6400 "Implement Mount interface using mount(8) and umount(8)"
|
||||
[264]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7588 "Trim Fleuntd tag for Cloud Logging"
|
||||
[265]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7569 "GCE CoreOS cluster - set master name based on variable"
|
||||
[266]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7535 "Capitalization of KubeProxyVersion wrong in JSON"
|
||||
[267]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7530 "Make nodes report their external IP rather than the master's."
|
||||
[268]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7539 "Trim cluster log tags to pod name and container name"
|
||||
[269]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7541 "Handle conversion of boolean query parameters with a value of "false""
|
||||
[270]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7532 "Add image-related methods to Runtime interface."
|
||||
[271]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7560 "Test whether auto-generated conversions weren't manually edited"
|
||||
[272]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7484 "Mention :latest behavior for image version tag"
|
||||
[273]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7487 "readinessProbe calls livenessProbe.Exec.Command which cause "invalid memory address or nil pointer dereference"."
|
||||
[274]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7520 "Add RuntimeHooks to abstract Kubelet logic"
|
||||
[275]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7546 "Expose URL() on Request to allow building URLs"
|
||||
[276]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7288 "Add a simple cache for objects stored in etcd"
|
||||
[277]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7431 "Prepare for chaining autogenerated conversion methods "
|
||||
[278]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7353 "Increase maxIdleConnection limit when creating etcd client in apiserver."
|
||||
[279]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7354 "Improvements to generator of conversion methods."
|
||||
[280]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7107 "Code to automatically generate conversion methods"
|
||||
[281]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7407 "Support recovery for anonymous roll outs"
|
||||
[282]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7527 "Bump kube2sky to 1.2. Point it at https endpoint (3rd try)."
|
||||
[283]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7526 "cluster/gce/coreos: Add metadata-service in node.yaml"
|
||||
[284]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7480 "Move ComputePodChanges to the Docker runtime"
|
||||
[285]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7510 "Cobra rebase"
|
||||
[286]: https://github.com/GoogleCloudPlatform/kubernetes/pull/6718 "Adding system oom events from kubelet"
|
||||
[287]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7479 "Move Prober to its own subpackage"
|
||||
[288]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7513 "Fix parallel-e2e.sh to work on my macbook (bash v3.2)"
|
||||
[289]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7449 "Move network plugin TearDown to DockerManager"
|
||||
[290]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7498 "CoreOS Getting Started Guide not working"
|
||||
[291]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7499 "Fixes #7498 - CoreOS Getting Started Guide had invalid cloud config"
|
||||
[292]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7504 "Fix invalid character '"' after object key:value pair"
|
||||
[293]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7317 "GlusterFS Volume Plugin deletes the contents of the mounted volume upon Pod deletion"
|
||||
[294]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7503 "Fixed kubelet deleting data from volumes on stop (#7317)."
|
||||
[295]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7482 "Fixing hooks/description to catch API fields without description tags"
|
||||
[296]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7457 "cadvisor is obsoleted so kubelet service does not require it."
|
||||
[297]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7408 "Set the default namespace for events to be "default""
|
||||
[298]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7446 "Fix typo in namespace conversion"
|
||||
[299]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7419 "Convert Secret registry to use update/create strategy, allow filtering by Type"
|
||||
[300]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7102 "Use pod namespace when looking for its GlusterFS endpoints."
|
||||
[301]: https://github.com/GoogleCloudPlatform/kubernetes/pull/7427 "Fixed name of kube-proxy path in deployment scripts."
|
||||
[
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: Resource Usage Monitoring in Kubernetes
|
||||
date: Wednesday, May 12, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
Understanding how an application behaves when deployed is crucial to scaling the application and providing a reliable service. In a Kubernetes cluster, application performance can be examined at many different levels: containers, [pods](http://kubernetes.io/docs/user-guide/pods), [services](http://kubernetes.io/docs/user-guide/services), and whole clusters. As part of Kubernetes we want to provide users with detailed resource usage information about their running applications at all these levels. This will give users deep insights into how their applications are performing and where possible application bottlenecks may be found. In comes [Heapster](https://github.com/kubernetes/heapster), a project meant to provide a base monitoring platform on Kubernetes.
|
||||
|
||||
|
||||
**Overview**
|
||||
|
||||
|
||||
Heapster is a cluster-wide aggregator of monitoring and event data. It currently supports Kubernetes natively and works on all Kubernetes setups. Heapster runs as a pod in the cluster, similar to how any Kubernetes application would run. The Heapster pod discovers all nodes in the cluster and queries usage information from the nodes’ [Kubelets](https://github.com/kubernetes/kubernetes/blob/master/DESIGN.md#kubelet), the on-machine Kubernetes agent. The Kubelet itself fetches the data from [cAdvisor](https://github.com/google/cadvisor). Heapster groups the information by pod along with the relevant labels. This data is then pushed to a configurable backend for storage and visualization. Currently supported backends include [InfluxDB](http://influxdb.com/) (with [Grafana](http://grafana.org/) for visualization), [Google Cloud Monitoring](https://cloud.google.com/monitoring/) and many others described in more details here. The overall architecture of the service can be seen below:
|
||||
|
||||
|
||||
[](https://2.bp.blogspot.com/-6Bu15356Zqk/V4mGINP8eOI/AAAAAAAAAmk/-RwvkJUt4rY2cmjqYFBmRo25FQQPRb27ACEw/s1600/monitoring-architecture.png)
|
||||
|
||||
Let’s look at some of the other components in more detail.
|
||||
|
||||
|
||||
|
||||
**cAdvisor**
|
||||
|
||||
|
||||
|
||||
cAdvisor is an open source container resource usage and performance analysis agent. It is purpose built for containers and supports Docker containers natively. In Kubernetes, cadvisor is integrated into the Kubelet binary. cAdvisor auto-discovers all containers in the machine and collects CPU, memory, filesystem, and network usage statistics. cAdvisor also provides the overall machine usage by analyzing the ‘root’? container on the machine.
|
||||
|
||||
|
||||
|
||||
On most Kubernetes clusters, cAdvisor exposes a simple UI for on-machine containers on port 4194. Here is a snapshot of part of cAdvisor’s UI that shows the overall machine usage:
|
||||
|
||||
|
||||
[](https://3.bp.blogspot.com/-V5KAfomW7Cg/V4mGH6OTKSI/AAAAAAAAAmo/EZHcG0afrs0606eTDMCryT6j6SoNzu3PgCEw/s1600/cadvisor.png)
|
||||
|
||||
**Kubelet**
|
||||
|
||||
The Kubelet acts as a bridge between the Kubernetes master and the nodes. It manages the pods and containers running on a machine. Kubelet translates each pod into its constituent containers and fetches individual container usage statistics from cAdvisor. It then exposes the aggregated pod resource usage statistics via a REST API.
|
||||
|
||||
|
||||
|
||||
**STORAGE BACKENDS**
|
||||
|
||||
|
||||
|
||||
**InfluxDB and Grafana**
|
||||
|
||||
|
||||
|
||||
A Grafana setup with InfluxDB is a very popular combination for monitoring in the open source world. InfluxDB exposes an easy to use API to write and fetch time series data. Heapster is setup to use this storage backend by default on most Kubernetes clusters. A detailed setup guide can be found [here](https://github.com/kubernetes/heapster/blob/master/docs/influxdb.md). InfluxDB and Grafana run in Pods. The pod exposes itself as a Kubernetes service which is how Heapster discovers it.
|
||||
|
||||
|
||||
|
||||
The Grafana container serves Grafana’s UI which provides an easy to configure dashboard interface. The default dashboard for Kubernetes contains an example dashboard that monitors resource usage of the cluster and the pods inside of it. This dashboard can easily be customized and expanded. Take a look at the storage schema for InfluxDB [here](https://github.com/kubernetes/heapster/blob/master/docs/storage-schema.md#metrics).
|
||||
|
||||
|
||||
|
||||
Here is a video showing how to monitor a Kubernetes cluster using heapster, InfluxDB and Grafana:
|
||||
|
||||
|
||||
[](https://www.youtube.com/watch?SZgqjMrxo3g)
|
||||
|
||||
|
||||
|
||||
|
||||
Here is a snapshot of the default Kubernetes Grafana dashboard that shows the CPU and Memory usage of the entire cluster, individual pods and containers:
|
||||
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-lHMeU_4UnAk/V4mGHyrWkBI/AAAAAAAAAms/SvnncgJ7ieAduBqQzpI86oaboIkAKEpEQCEw/s1600/influx.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
**Google Cloud Monitoring**
|
||||
|
||||
|
||||
|
||||
Google Cloud Monitoring is a hosted monitoring service that allows you to visualize and alert on important metrics in your application. Heapster can be setup to automatically push all collected metrics to Google Cloud Monitoring. These metrics are then available in the [Cloud Monitoring Console](https://app.google.stackdriver.com/). This storage backend is the easiest to setup and maintain. The monitoring console allows you to easily create and customize dashboards using the exported data.
|
||||
|
||||
|
||||
|
||||
Here is a video showing how to setup and run a Google Cloud Monitoring backed Heapster:
|
||||
"https://youtube.com/embed/xSMNR2fcoLs"
|
||||
Here is a snapshot of the a Google Cloud Monitoring dashboard showing cluster-wide resource usage.
|
||||
|
||||
|
||||
|
||||
[](https://2.bp.blogspot.com/-F2j3kYn3IoA/V4mGH3M-0gI/AAAAAAAAAmg/aoml93zPeKsKbTX1tN5sTtRRTw7dAKsxwCEw/s1600/gcm.png)
|
||||
|
||||
|
||||
|
||||
**Try it out!**
|
||||
|
||||
|
||||
|
||||
Now that you’ve learned a bit about Heapster, feel free to try it out on your own clusters! The [Heapster repository](https://github.com/kubernetes/heapster) is available on GitHub. It contains detailed instructions to setup Heapster and its storage backends. Heapster runs by default on most Kubernetes clusters, so you may already have it! Feedback is always welcome. Please let us know if you run into any issues via the troubleshooting channels.
|
||||
|
||||
|
||||
|
||||
_-- Vishnu Kannan and Victor Marmol, Google Software Engineers_
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - May 1 2015 "
|
||||
date: Tuesday, May 11, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
* Simple rolling update - Brendan
|
||||
|
||||
* Rolling update = nice example of why RCs and Pods are good.
|
||||
|
||||
* ...pause… (Brendan needs demo recovery tips from Kelsey)
|
||||
|
||||
* Rolling update has recovery: Cancel update and restart, update continues from where it stopped.
|
||||
|
||||
* New controller gets name of old controller, so appearance is pure update.
|
||||
|
||||
* Can also name versions in update (won't do rename at the end).
|
||||
* Rocket demo - CoreOS folks
|
||||
|
||||
* 2 major differences between rocket & docker: Rocket is daemonless & pod-centric.
|
||||
|
||||
* Rocket has AppContainer format as native, but also supports docker image format.
|
||||
|
||||
* Can run AppContainer and docker containers in same pod.
|
||||
|
||||
* Changes are close to merged.
|
||||
* demo service accounts and secrets being added to pods - Jordan
|
||||
|
||||
* Problem: It's hard to get a token to talk to the API.
|
||||
|
||||
* New API object: "ServiceAccount"
|
||||
|
||||
* ServiceAccount is namespaced, controller makes sure that at least 1 default service account exists in a namespace.
|
||||
|
||||
* Typed secret "ServiceAccountToken", controller makes sure there is at least 1 default token.
|
||||
|
||||
* DEMO
|
||||
|
||||
* * Can create new service account with ServiceAccountToken. Controller will create token for it.
|
||||
|
||||
* Can create a pod with service account, pods will have service account secret mounted at /var/run/secrets/kubernetes.io/…
|
||||
* Kubelet running in a container - Paul
|
||||
|
||||
* Kubelet successfully ran pod w/ mounted secret.
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - May 15 2015 "
|
||||
date: Tuesday, May 18, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
|
||||
* [v1 API][1] \- what's in, what's out
|
||||
* We're trying to fix critical issues we discover with v1beta3
|
||||
* Would like to make a number of minor cleanups that will be expensive to do later
|
||||
* defaulting replication controller spec default to 1
|
||||
* deduplicating security context
|
||||
* change id field to name
|
||||
* rename host
|
||||
* inconsistent times
|
||||
* typo in container states terminated (termination vs. terminated)
|
||||
* flatten structure (requested by heavy API user)
|
||||
* pod templates - could be added after V1, field is not implemented, remove template ref field
|
||||
* in general remove any fields not implemented (can be added later)
|
||||
* if we want to change any of the identifier validation rules, should do it now
|
||||
* recently changed label validation rules to be more precise
|
||||
* Bigger changes
|
||||
* generalized label selectors
|
||||
* service - change the fields in a way that we can add features in a forward compatible manner if possible
|
||||
* public IPs - what to do from a security perspective
|
||||
* Support aci format - there is an image field - add properties to signify the image, or include it in a string
|
||||
* inconsistent on object use / cross reference - needs design discussion
|
||||
* Things to do later
|
||||
* volume source cleanup
|
||||
* multiple API prefixes
|
||||
* watch changes - watch client is not notified of progress
|
||||
* A few other proposals
|
||||
* swagger spec fixes - ongoing
|
||||
* additional field selectors - additive, backward compatible
|
||||
* additional status - additive, backward compatible
|
||||
* elimination of phase - won't make it for v1
|
||||
* Service discussion - Public IPs
|
||||
* with public ips as it exists we can't go to v1
|
||||
* Tim has been developing a mitigation if we can't get Justin's overhaul in (but hopefully we will)
|
||||
* Justin's fix will describe public IPs in a much better way
|
||||
* The general problem is it's too flexible and you can do things that are scary, the mitigation is to restrict public ip usage to specific use cases -- validated public ips would be copied to status, which is what kube-proxy would use
|
||||
* public ips used for -
|
||||
* binding to nodes / node
|
||||
* request a specific load balancer IP (GCE only)
|
||||
* emulate multi-port services -- now we support multi-port services, so no longer necessary
|
||||
* This is a large change, 70% code complete, Tim & Justin working together, parallel code review and updates, need to reconcile and test
|
||||
* Do we want to allow people to request host ports - is there any value in letting people ask for a public port? or should we assign you one?
|
||||
* Tim: we should assign one
|
||||
* discussion of what to do with status - if users set to empty then probably their intention
|
||||
* general answer to the pattern is binding
|
||||
* post v1: if we can make portal ip a non-user settable field, then we need to figure out the transition plan. need to have a fixed ip for dns.
|
||||
* we should be able to just randomly assign services a new port and everything should adjust, but this is not feasible for v1
|
||||
* next iteration of the proposal: PR is being iterated on, testing over the weekend, so PR hopefully ready early next week - gonna be a doozie!
|
||||
* API transition
|
||||
* actively removing all dependencies on v1beta1 and v1beta2, announced their going away
|
||||
* working on a script that will touch everything in the system and will force everything to flip to v1beta3
|
||||
* a release with both APIs supported and with this script can make sure clusters are moved over and we can move the API
|
||||
* Should be gone by 0.19
|
||||
* Help is welcome, especially for trivial things and will try to get as much done as possible in next few weeks
|
||||
* Release candidate targeting mid june
|
||||
* The new kubectl will not work for old APIs, will be a problem for GKE for clusters pinned to old version. Will be a problem for k8s users as well if they update kubectl
|
||||
* Since there's no way to upgrade a GKE cluster, users are going to have to tear down and upgrade their cluster
|
||||
* we're going to stop testing v1beta1 very soon, trying to streamline the testing paths in our CI pipelines
|
||||
* Did we decide we are not going to do namespace autoprovisioning?
|
||||
* Brian would like to turn it off - no objections
|
||||
* Documentation should include creating namepspaces
|
||||
* Would like to impose a default CPU for the default namespace
|
||||
* would cap the number of pods, would reduce the resource exhaustion issue
|
||||
* would eliminate need to explicitly cap the number of pods on a node due to IP exhaustion
|
||||
* could add resources as arguments to the porcelain commands
|
||||
* kubectl run is a simplified command, but it could include some common things (image, command, ports). but could add resources
|
||||
* Kubernetes 1.0 Launch Event
|
||||
* Save the date: July 21st in Portland, OR - a part of OSCON
|
||||
* Blog posts, whitepapers, etc. welcome to be published
|
||||
* Event will be live streamed, mostly demos & customer talks, keynote
|
||||
* Big launch party in the evening
|
||||
* Kit to send more info in next couple weeks
|
||||
|
||||
[1]: https://github.com/GoogleCloudPlatform/kubernetes/issues/7018
|
||||
|
|
@ -0,0 +1,316 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Cluster Level Logging with Kubernetes "
|
||||
date: Friday, June 11, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
|
||||
A Kubernetes cluster will typically be humming along running many system and application pods. How does the system administrator collect, manage and query the logs of the system pods? How does a user query the logs of their application which is composed of many pods which may be restarted or automatically generated by the Kubernetes system? These questions are addressed by the Kubernetes cluster level logging services.
|
||||
|
||||
|
||||
Cluster level logging for Kubernetes allows us to collect logs which persist beyond the lifetime of the pod’s container images or the lifetime of the pod or even cluster. In this article we assume that a Kubernetes cluster has been created with cluster level logging support for sending logs to [Google Cloud Logging](https://cloud.google.com/logging/docs/). This is an option when creating a [Google Container Engine](https://cloud.google.com/container-engine/) (GKE) cluster, and is enabled by default for the open source [Google Compute Engine](https://cloud.google.com/compute/) (GCE) Kubernetes distribution. After a cluster has been created you will have a collection of system [pods](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/pods.md) running that support monitoring, logging and DNS resolution for names of Kubernetes services:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
```
|
||||
```
|
||||
NAME READY REASON RESTARTS AGE
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-0f64 1/1 Running 0 32m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-27gf 1/1 Running 0 32m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-pk22 1/1 Running 0 31m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-20ej 1/1 Running 0 31m
|
||||
|
||||
kube-dns-v3-pk22 3/3 Running 0 32m
|
||||
|
||||
|
||||
monitoring-heapster-v1-20ej 0/1 Running 9 32m
|
||||
```
|
||||
Here is the same information in a picture which shows how the pods might be placed on specific nodes.
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-FSXnrHLDMJs/Vxfzx2rsreI/AAAAAAAAAbk/PaDTpksKEZk4e8YQff5-JhGPoEpgyWaHgCLcB/s1600/cloud-logging.png)
|
||||
|
||||
|
||||
|
||||
|
||||
Here is a close up of what is running on each node.
|
||||
|
||||
|
||||
[](https://4.bp.blogspot.com/-T7kPtjq8O9A/Vxfz6k7XogI/AAAAAAAAAbo/-59dO6F58sERDOQGJ7872ex_KkEKFpArwCLcB/s1600/0f64.png)
|
||||
|
||||
|
||||
|
||||
[](https://3.bp.blogspot.com/-5VRLexsSJwA/Vxf0F0ccVDI/AAAAAAAAAbs/rh4KGFc95-cIdrTxAujYH2LMrCQ8vrdzQCLcB/s1600/27gf.png)
|
||||
|
||||
|
||||
|
||||
[](https://4.bp.blogspot.com/-UXOxauNy8FQ/Vxf0SaGujNI/AAAAAAAAAb0/Pnf6e_iiUfoKkooGyrF3Gmd8wh0vPrteQCLcB/s1600/pk22.png)
|
||||
|
||||
[](https://2.bp.blogspot.com/-UgpwCx4BNwQ/Vxf0Wc8-HwI/AAAAAAAAAb4/g3D1bE74FQA2k9uwc9ZbZuB1N7MTU7swgCLcB/s1600/20ej.png)
|
||||
|
||||
|
||||
|
||||
|
||||
The first diagram shows four nodes created on a GCE cluster with the name of each VM node on a purple background. The internal and public IPs of each node are shown on gray boxes and the pods running in each node are shown in green boxes. Each pod box shows the name of the pod and the namespace it runs in, the IP address of the pod and the images which are run as part of the pod’s execution. Here we see that every node is running a fluentd-cloud-logging pod which is collecting the log output of the containers running on the same node and sending them to Google Cloud Logging. A pod which provides a [cluster DNS service](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/dns.md) runs on one of the nodes and a pod which provides monitoring support runs on another node.
|
||||
|
||||
|
||||
To help explain how cluster level logging works let’s start off with a synthetic log generator pod specification [counter-pod.yaml](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/examples/blog-logging/counter-pod.yaml):
|
||||
|
||||
|
||||
|
||||
```
|
||||
apiVersion : v1
|
||||
kind : Pod
|
||||
metadata :
|
||||
name : counter
|
||||
spec :
|
||||
containers :
|
||||
- name : count
|
||||
image : ubuntu:14.04
|
||||
args : [bash, -c,
|
||||
'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done']
|
||||
```
|
||||
|
||||
|
||||
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod.
|
||||
|
||||
```
|
||||
|
||||
$ kubectl create -f counter-pod.yaml
|
||||
|
||||
|
||||
pods/counter
|
||||
|
||||
```
|
||||
We can observe the running pod:
|
||||
```
|
||||
$ kubectl get pods
|
||||
```
|
||||
```
|
||||
NAME READY REASON RESTARTS AGE
|
||||
|
||||
counter 1/1 Running 0 5m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-0f64 1/1 Running 0 55m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-27gf 1/1 Running 0 55m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-pk22 1/1 Running 0 55m
|
||||
|
||||
fluentd-cloud-logging-kubernetes-minion-20ej 1/1 Running 0 55m
|
||||
|
||||
kube-dns-v3-pk22 3/3 Running 0 55m
|
||||
|
||||
monitoring-heapster-v1-20ej 0/1 Running 9 56m
|
||||
```
|
||||
|
||||
|
||||
This step may take a few minutes to download the ubuntu:14.04 image during which the pod status will be shown as Pending.
|
||||
|
||||
|
||||
One of the nodes is now running the counter pod:
|
||||
|
||||
|
||||
[](https://4.bp.blogspot.com/-BI3zOVlrHwA/Vxf0KwcqtCI/AAAAAAAAAbw/vzv8X8vQrso9Iycx4qQHuOslE8So7smLgCLcB/s1600/27gf-counter.png)
|
||||
|
||||
|
||||
|
||||
|
||||
When the pod status changes to Running we can use the kubectl logs command to view the output of this counter pod.
|
||||
|
||||
```
|
||||
$ kubectl logs counter
|
||||
|
||||
0: Tue Jun 2 21:37:31 UTC 2015
|
||||
|
||||
1: Tue Jun 2 21:37:32 UTC 2015
|
||||
|
||||
2: Tue Jun 2 21:37:33 UTC 2015
|
||||
|
||||
3: Tue Jun 2 21:37:34 UTC 2015
|
||||
|
||||
4: Tue Jun 2 21:37:35 UTC 2015
|
||||
|
||||
5: Tue Jun 2 21:37:36 UTC 2015
|
||||
|
||||
```
|
||||
|
||||
|
||||
This command fetches the log text from the Docker log file for the image that is running in this container. We can connect to the running container and observe the running counter bash script.
|
||||
|
||||
``` bash
|
||||
$ kubectl exec -i counter bash
|
||||
|
||||
ps aux
|
||||
|
||||
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
|
||||
root 1 0.0 0.0 17976 2888 ? Ss 00:02 0:00 bash -c for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done
|
||||
|
||||
root 468 0.0 0.0 17968 2904 ? Ss 00:05 0:00 bash
|
||||
|
||||
root 479 0.0 0.0 4348 812 ? S 00:05 0:00 sleep 1
|
||||
|
||||
root 480 0.0 0.0 15572 2212 ? R 00:05 0:00 ps aux
|
||||
```
|
||||
|
||||
What happens if for any reason the image in this pod is killed off and then restarted by Kubernetes? Will we still see the log lines from the previous invocation of the container followed by the log lines for the started container? Or will we lose the log lines from the original container’s execution and only see the log lines for the new container? Let’s find out. First let’s stop the currently running counter.
|
||||
|
||||
```
|
||||
$ kubectl stop pod counter
|
||||
|
||||
pods/counter
|
||||
|
||||
|
||||
Now let’s restart the counter.
|
||||
|
||||
|
||||
$ kubectl create -f counter-pod.yaml
|
||||
|
||||
pods/counter
|
||||
```
|
||||
|
||||
Let’s wait for the container to restart and get the log lines again.
|
||||
|
||||
```
|
||||
$ kubectl logs counter
|
||||
|
||||
0: Tue Jun 2 21:51:40 UTC 2015
|
||||
|
||||
1: Tue Jun 2 21:51:41 UTC 2015
|
||||
|
||||
2: Tue Jun 2 21:51:42 UTC 2015
|
||||
|
||||
3: Tue Jun 2 21:51:43 UTC 2015
|
||||
|
||||
4: Tue Jun 2 21:51:44 UTC 2015
|
||||
|
||||
5: Tue Jun 2 21:51:45 UTC 2015
|
||||
|
||||
6: Tue Jun 2 21:51:46 UTC 2015
|
||||
|
||||
7: Tue Jun 2 21:51:47 UTC 2015
|
||||
|
||||
8: Tue Jun 2 21:51:48 UTC 2015
|
||||
|
||||
```
|
||||
Oh no! We’ve lost the log lines from the first invocation of the container in this pod! Ideally, we want to preserve all the log lines from each invocation of each container in the pod. Furthermore, even if the pod is restarted we would still like to preserve all the log lines that were ever emitted by the containers in the pod. But don’t fear, this is the functionality provided by cluster level logging in Kubernetes. When a cluster is created, the standard output and standard error output of each container can be ingested using a [Fluentd](http://www.fluentd.org/) agent running on each node into either [Google Cloud Logging](https://cloud.google.com/logging/docs/) or into Elasticsearch and viewed with Kibana. This blog article focuses on Google Cloud Logging.
|
||||
|
||||
|
||||
|
||||
When a Kubernetes cluster is created with logging to Google Cloud Logging enabled, the system creates a pod called fluentd-cloud-logging on each node of the cluster to collect Docker container logs. These pods were shown at the start of this blog article in the response to the first get pods command.
|
||||
|
||||
|
||||
This log collection pod has a specification which looks something like this [fluentd-gcp.yaml](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml):
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
|
||||
name: fluentd-cloud-logging
|
||||
|
||||
spec:
|
||||
|
||||
containers:
|
||||
|
||||
- name: fluentd-cloud-logging
|
||||
|
||||
image: gcr.io/google\_containers/fluentd-gcp:1.6
|
||||
|
||||
env:
|
||||
|
||||
- name: FLUENTD\_ARGS
|
||||
|
||||
value: -qq
|
||||
|
||||
volumeMounts:
|
||||
|
||||
- name: containers
|
||||
|
||||
mountPath: /var/lib/docker/containers
|
||||
|
||||
volumes:
|
||||
|
||||
- name: containers
|
||||
|
||||
hostPath:
|
||||
|
||||
path: /var/lib/docker/containers
|
||||
```
|
||||
|
||||
This pod specification maps the the directory on the host containing the Docker log files, /var/lib/docker/containers, to a directory inside the container which has the same path. The pod runs one image, gcr.io/google\_containers/fluentd-gcp:1.6, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it.
|
||||
|
||||
|
||||
We can click on the Logs item under the Monitoring section of the Google Developer Console and select the logs for the counter container, which will be called kubernetes.counter\_default\_count. This identifies the name of the pod (counter), the namespace (default) and the name of the container (count) for which the log collection occurred. Using this name we can select just the logs for our counter container from the drop down menu:
|
||||
|
||||
|
||||
|
||||
_(image-counter-new-logs.png)_
|
||||
|
||||
When we view the logs in the Developer Console we observe the logs for both invocations of the container.
|
||||
|
||||
_(image-screenshot-2015-06-02)_
|
||||
|
||||
|
||||
Note the first container counted to 108 and then it was terminated. When the next container image restarted the counting process resumed from 0. Similarly if we deleted the pod and restarted it we would capture the logs for all instances of the containers in the pod whenever the pod was running.
|
||||
|
||||
|
||||
|
||||
Logs ingested into Google Cloud Logging may be exported to various other destinations including [Google Cloud Storage](https://cloud.google.com/storage/) buckets and [BigQuery](https://cloud.google.com/bigquery/). Use the Exports tab in the Cloud Logging console to specify where logs should be streamed to (or follow this link to the [settings tab](https://pantheon.corp.google.com/project/_/logs/settings)).
|
||||
|
||||
|
||||
|
||||
We could query the ingested logs from BigQuery using the SQL query which reports the counter log lines showing the newest lines first.
|
||||
|
||||
|
||||
|
||||
SELECT metadata.timestamp, structPayload.log FROM [mylogs.kubernetes\_counter\_default\_count\_20150611] ORDER BY metadata.timestamp DESC
|
||||
|
||||
|
||||
|
||||
Here is some sample output:
|
||||
|
||||
|
||||
**_(image-bigquery-log-new.png)_**
|
||||
|
||||
|
||||
We could also fetch the logs from Google Cloud Storage buckets to our desktop or laptop and then search them locally. The following command fetches logs for the counter pod running in a cluster which is itself in a GCE project called myproject. Only logs for the date 2015-06-11 are fetched.
|
||||
|
||||
|
||||
```
|
||||
$ gsutil -m cp -r gs://myproject/kubernetes.counter\_default\_count/2015/06/11 .
|
||||
```
|
||||
Now we can run queries over the ingested logs. The example below uses the[jq](http://stedolan.github.io/jq/) program to extract just the log lines.
|
||||
|
||||
|
||||
```
|
||||
$ cat 21\:00\:00\_21\:59\:59\_S0.json | jq '.structPayload.log'
|
||||
|
||||
"0: Thu Jun 11 21:39:38 UTC 2015\n"
|
||||
|
||||
"1: Thu Jun 11 21:39:39 UTC 2015\n"
|
||||
|
||||
"2: Thu Jun 11 21:39:40 UTC 2015\n"
|
||||
|
||||
"3: Thu Jun 11 21:39:41 UTC 2015\n"
|
||||
|
||||
"4: Thu Jun 11 21:39:42 UTC 2015\n"
|
||||
|
||||
"5: Thu Jun 11 21:39:43 UTC 2015\n"
|
||||
|
||||
"6: Thu Jun 11 21:39:44 UTC 2015\n"
|
||||
|
||||
"7: Thu Jun 11 21:39:45 UTC 2015\n"
|
||||
```
|
||||
|
||||
This article has touched briefly on the underlying mechanisms that support gathering cluster level logs on a Kubernetes deployment. The approach here only works for gathering the standard output and standard error output of the processes running in the pod’s containers. To gather other logs that are stored in files one can use a sidecar container to gather the required files as described at the page [Collecting log files within containers with Fluentd and sending them to the Google Cloud Logging service](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/contrib/logging/fluentd-sidecar-gcp).
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Slides: Cluster Management with Kubernetes, talk given at the University of Edinburgh "
|
||||
date: Saturday, June 26, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
On Friday 5 June 2015 I gave a talk called [Cluster Management with Kubernetes](https://docs.google.com/presentation/d/1H4ywDb4vAJeg8KEjpYfhNqFSig0Q8e_X5I36kM9S6q0/pub?start=false&loop=false&delayms=3000) to a general audience at the University of Edinburgh. The talk includes an example of a music store system with a Kibana front end UI and an Elasticsearch based back end which helps to make concrete concepts like pods, replication controllers and services.
|
||||
|
||||
[Cluster Management with Kubernetes](https://docs.google.com/presentation/d/1H4ywDb4vAJeg8KEjpYfhNqFSig0Q8e_X5I36kM9S6q0/pub?start=false&loop=false&delayms=3000).
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " The Distributed System ToolKit: Patterns for Composite Containers "
|
||||
date: Tuesday, June 29, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Having had the privilege of presenting some ideas from Kubernetes at DockerCon 2015, I thought I would make a blog post to share some of these ideas for those of you who couldn’t be there.
|
||||
|
||||
Over the past two years containers have become an increasingly popular way to package and deploy code. Container images solve many real-world problems with existing packaging and deployment tools, but in addition to these significant benefits, containers offer us an opportunity to fundamentally re-think the way we build distributed applications. Just as service oriented architectures (SOA) encouraged the decomposition of applications into modular, focused services, containers should encourage the further decomposition of these services into closely cooperating modular containers. By virtue of establishing a boundary, containers enable users to build their services using modular, reusable components, and this in turn leads to services that are more reliable, more scalable and faster to build than applications built from monolithic containers.
|
||||
|
||||
In many ways the switch from VMs to containers is like the switch from monolithic programs of the 1970s and early 80s to modular object-oriented programs of the late 1980s and onward. The abstraction layer provided by the container image has a great deal in common with the abstraction boundary of the class in object-oriented programming, and it allows the same opportunities to improve developer productivity and application quality. Just like the right way to code is the separation of concerns into modular objects, the right way to package applications in containers is the separation of concerns into modular containers. Fundamentally this means breaking up not just the overall application, but also the pieces within any one server into multiple modular containers that are easy to parameterize and re-use. In this way, just like the standard libraries that are ubiquitous in modern languages, most application developers can compose together modular containers that are written by others, and build their applications more quickly and with higher quality components.
|
||||
|
||||
The benefits of thinking in terms of modular containers are enormous, in particular, modular containers provide the following:
|
||||
|
||||
-
|
||||
Speed application development, since containers can be re-used between teams and even larger communities
|
||||
-
|
||||
Codify expert knowledge, since everyone collaborates on a single containerized implementation that reflects best-practices rather than a myriad of different home-grown containers with roughly the same functionality
|
||||
-
|
||||
Enable agile teams, since the container boundary is a natural boundary and contract for team responsibilities
|
||||
-
|
||||
Provide separation of concerns and focus on specific functionality that reduces spaghetti dependencies and un-testable components
|
||||
|
||||
Building an application from modular containers means thinking about symbiotic groups of containers that cooperate to provide a service, not one container per service. In Kubernetes, the embodiment of this modular container service is a Pod. A Pod is a group of containers that share resources like file systems, kernel namespaces and an IP address. The Pod is the atomic unit of scheduling in a Kubernetes cluster, precisely because the symbiotic nature of the containers in the Pod require that they be co-scheduled onto the same machine, and the only way to reliably achieve this is by making container groups atomic scheduling units.
|
||||
|
||||
|
||||
When you start thinking in terms of Pods, there are naturally some general patterns of modular application development that re-occur multiple times. I’m confident that as we move forward in the development of Kubernetes more of these patterns will be identified, but here are three that we see commonly:
|
||||
|
||||
## Example #1: Sidecar containers
|
||||
|
||||
Sidecar containers extend and enhance the "main" container, they take existing containers and make them better. As an example, consider a container that runs the Nginx web server. Add a different container that syncs the file system with a git repository, share the file system between the containers and you have built Git push-to-deploy. But you’ve done it in a modular manner where the git synchronizer can be built by a different team, and can be reused across many different web servers (Apache, Python, Tomcat, etc). Because of this modularity, you only have to write and test your git synchronizer once and reuse it across numerous apps. And if someone else writes it, you don’t even need to do that.
|
||||
|
||||
[](https://3.bp.blogspot.com/-IVsNKDqS0jE/WRnPX21pxEI/AAAAAAAABJg/lAj3NIFwhPwvJYrmCdVbq1bqNq3E4AkhwCLcB/s1600/Example%2B%25231-%2BSidecar%2Bcontainers%2B.png)
|
||||
|
||||
## Example #2: Ambassador containers
|
||||
|
||||
Ambassador containers proxy a local connection to the world. As an example, consider a Redis cluster with read-replicas and a single write master. You can create a Pod that groups your main application with a Redis ambassador container. The ambassador is a proxy is responsible for splitting reads and writes and sending them on to the appropriate servers. Because these two containers share a network namespace, they share an IP address and your application can open a connection on “localhost” and find the proxy without any service discovery. As far as your main application is concerned, it is simply connecting to a Redis server on localhost. This is powerful, not just because of separation of concerns and the fact that different teams can easily own the components, but also because in the development environment, you can simply skip the proxy and connect directly to a Redis server that is running on localhost.
|
||||
|
||||
[](https://4.bp.blogspot.com/-yEmqGZ86mNQ/WRnPYG1m3jI/AAAAAAAABJo/94DlN54LA-oTsORjEBHfHS_UQTIbNPvcgCEw/s1600/Example%2B%25232-%2BAmbassador%2Bcontainers.png)
|
||||
|
||||
## Example #3: Adapter containers
|
||||
|
||||
Adapter containers standardize and normalize output. Consider the task of monitoring N different applications. Each application may be built with a different way of exporting monitoring data. (e.g. JMX, StatsD, application specific statistics) but every monitoring system expects a consistent and uniform data model for the monitoring data it collects. By using the adapter pattern of composite containers, you can transform the heterogeneous monitoring data from different systems into a single unified representation by creating Pods that groups the application containers with adapters that know how to do the transformation. Again because these Pods share namespaces and file systems, the coordination of these two containers is simple and straightforward.
|
||||
|
||||
[](https://4.bp.blogspot.com/-4rfSCMwvSwo/WRnPYLLQZqI/AAAAAAAABJk/c29uQgM2lSMHaUL013scJo_z4O8w38mJgCEw/s1600/Example%2B%25233-%2BAdapter%2Bcontainers%2B.png)
|
||||
|
||||
|
||||
In all of these cases, we've used the container boundary as an encapsulation/abstraction boundary that allows us to build modular, reusable components that we combine to build out applications. This reuse enables us to more effectively share containers between different developers, reuse our code across multiple applications, and generally build more reliable, robust distributed systems more quickly. I hope you’ve seen how Pods and composite container patterns can enable you to build robust distributed systems more quickly, and achieve container code re-use. To try these patterns out yourself in your own applications. I encourage you to go check out open source Kubernetes or Google Container Engine.
|
||||
|
||||
- Brendan Burns, Software Engineer at Google
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - May 22 2015 "
|
||||
date: Wednesday, June 02, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
|
||||
Discussion / Topics
|
||||
|
||||
* Code Freeze
|
||||
* Upgrades of cluster
|
||||
* E2E test issues
|
||||
|
||||
Code Freeze process starts EOD 22-May, including
|
||||
|
||||
* Code Slush -- draining PRs that are active. If there are issues for v1 to raise, please do so today.
|
||||
* Community PRs -- plan is to reopen in ~6 weeks.
|
||||
* Key areas for fixes in v1 -- docs, the experience.
|
||||
|
||||
E2E issues and LGTM process
|
||||
|
||||
* Seen end-to-end tests go red.
|
||||
* Plan is to limit merging to on-call. Quinton to communicate.
|
||||
* Can we expose Jenkins runs to community? (Paul)
|
||||
|
||||
* Question/concern to work out is securing Jenkins. Short term conclusion: Will look at pushing Jenkins logs into GCS bucket. Lavalamp will follow up with Jeff Grafton.
|
||||
|
||||
* Longer term solution may be a merge queue, where e2e runs for each merge (as opposed to multiple merges). This exists in Openshift today.
|
||||
|
||||
Cluster Upgrades for Kubernetes as final v1 feature
|
||||
|
||||
* GCE will use Persistent Disk (PD) to mount new image.
|
||||
* OpenShift will follow a tradition update model, with "yum update".
|
||||
* A strawman approach is to have an analog of "kube-push" to update the master, in-place. Feedback in the meeting was
|
||||
|
||||
* Upgrading Docker daemon on the master will kill the master's pods. Agreed. May consider an 'upgrade' phase or explicit step.
|
||||
|
||||
* How is this different than HA master upgrade? See HA case as a superset. The work to do an upgrade would be a prerequisite for HA master upgrade.
|
||||
* Mesos scheduler implements a rolling node upgrade.
|
||||
|
||||
Attention requested for v1 in the Hangout
|
||||
|
||||
* * Discussed that it's an eventually consistent design.*
|
||||
|
||||
* In the meeting, the outcome was: seeking a pattern for atomicity of update across multiple piece. Paul to ping Tim when ready to review.
|
||||
* Regression in e2e [#8499][1] (Eric Paris)
|
||||
* Asking for review of direction, if not review. [#8334][2] (Mark)
|
||||
* Handling graceful termination (e.g. sigterm to postgres) is not implemented. [#2789][3] (Clayton)
|
||||
|
||||
* Need is to bump up grace period or finish plumbing. In API, client tools, missing is kubelet does use and we don't set the timeout (>0) value.
|
||||
|
||||
* Brendan will look into this graceful term issue.
|
||||
* Load balancer almost ready by JustinSB.
|
||||
|
||||
[1]: https://github.com/GoogleCloudPlatform/kubernetes/issues/8499
|
||||
[2]: https://github.com/GoogleCloudPlatform/kubernetes/pull/8334
|
||||
[3]: https://github.com/GoogleCloudPlatform/kubernetes/issues/2789
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
layout: blog
|
||||
permalink: /blog/:year/:month/:title
|
||||
title: " Announcing the First Kubernetes Enterprise Training Course "
|
||||
date: Thursday, July 08, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
At Google we rely on Linux application containers to run our core infrastructure. Everything from Search to Gmail runs in containers. In fact, we like containers so much that even our Google Compute Engine VMs run in containers! Because containers are critical to our business, we have been working with the community on many of the basic container technologies (from cgroups to Docker’s LibContainer) and even decided to build the next generation of Google’s container scheduling technology, Kubernetes, in the open.
|
||||
|
||||
|
||||
|
||||
One year into the Kubernetes project, and on the eve of our planned V1 release at OSCON, we are pleased to announce the first-ever formal Kubernetes enterprise-focused training session organized by a key Kubernetes contributor, Mesosphere. The inaugural session will be taught by Zed Shaw and Michael Hausenblas from Mesosphere, and will take place on July 20 at OSCON in Portland. [Pre-registration](https://mesosphere.com/training/kubernetes/) is free for early registrants, but space is limited so act soon!
|
||||
|
||||
|
||||
|
||||
This one-day course will cover the basics of building and deploying containerized applications using Kubernetes. It will walk attendees through the end-to-end process of creating a Kubernetes application architecture, building and configuring Docker images, and deploying them on a Kubernetes cluster. Users will also learn the fundamentals of deploying Kubernetes applications and services on our Google Container Engine and Mesosphere’s Datacenter Operating System.
|
||||
|
||||
|
||||
The upcoming Kubernetes bootcamp will be a great way to learn how to apply Kubernetes to solve long-standing deployment and application management problems. This is just the first of what we hope are many, and from a broad set of contributors.
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " How did the Quake demo from DockerCon Work? "
|
||||
date: Friday, July 02, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Shortly after its release in 2013, Docker became a very popular open source container management tool for Linux. Docker has a rich set of commands to control the execution of a container. Commands such as start, stop, restart, kill, pause, and unpause. However, what is still missing is the ability to Checkpoint and Restore (C/R) a container natively via Docker itself.
|
||||
|
||||
|
||||
|
||||
We’ve been actively working with upstream and community developers to add support in Docker for native C/R and hope that checkpoint and restore commands will be introduced in Docker 1.8. As of this writing, it’s possible to C/R a container externally because this functionality was recently merged in libcontainer.
|
||||
|
||||
|
||||
|
||||
External container C/R was demo’d at DockerCon 2015:
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
Container C/R offers many benefits including the following:
|
||||
|
||||
- Stop and restart the Docker daemon (say for an upgrade) without having to kill the running containers and restarting them from scratch, losing precious work they had done when they were stopped
|
||||
- Reboot the system without having to restart the containers from scratch. Same benefits as use case 1 above
|
||||
- Speed up the start time of slow-start applications
|
||||
- “Forensic debugging" of processes running in a container by examining their checkpoint images (open files, memory segments, etc.)
|
||||
- Migrate containers by restoring them on a different machine
|
||||
|
||||
CRIU
|
||||
|
||||
Implementing C/R functionality from scratch is a major undertaking and a daunting task. Fortunately, there is a powerful open source tool written in C that has been used in production for checkpointing and restoring entire process trees in Linux. The tool is called CRIU which stands for Checkpoint Restore In Userspace (http://criu.org). CRIU works by:
|
||||
|
||||
- Freezing a running application.
|
||||
- Checkpointing the address space and state of the entire process tree to a collection of “image” files.
|
||||
- Restoring the process tree from checkpoint image files.
|
||||
- Resuming application from the point it was frozen.
|
||||
|
||||
In April 2014, we decided to find out if CRIU could checkpoint and restore Docker containers to facilitate container migration.
|
||||
|
||||
|
||||
#### Phase 1 - External C/R
|
||||
|
||||
The first phase of this effort invoking CRIU directly to dump a process tree running inside a container and determining why the checkpoint or restore operation failed. There were quite a few issues that caused CRIU failure. The following three issues were among the more challenging ones.
|
||||
|
||||
#### External Bind Mounts
|
||||
|
||||
Docker sets up /etc/{hostname,hosts,resolv.conf} as targets with source files outside the container's mount namespace.
|
||||
|
||||
The --ext-mount-map command line option was added to CRIU to specify the path of the external bind mounts. For example, assuming default Docker configuration, /etc/hostname in the container's mount namespace is bind mounted from the source at /var/lib/docker/containers/\<container-id\>/hostname. When checkpointing, we tell CRIU to record /etc/hostname's "map" as, say, etc\_hostname. When restoring, we tell CRIU that that the file previously recorded as etc\_hostname should be mapped from the external bind mount at /var/lib/docker/containers/\<container-id\>/hostname.
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
#### AUFS Pathnames
|
||||
|
||||
Docker initially used AUFS as its preferred filesystem which is still in wide usage (the preferred filesystem is now OverlayFS).. Due to a bug, the AUFS symbolic link paths of /proc/\<pid\>/map\_files point inside AUFS branches instead of their pathnames relative to the container's root. This problem has been fixed in AUFS source code but hasn't made it to all the distros yet. CRIU would get confused seeing the same file in its physical location (in the branch) and its logical location (from the root of mount namespace).
|
||||
|
||||
The --root command line option that was used only during restore was generalized to understand the root of the mount namespace during checkpoint and automatically "fix" the exposed AUFS pathnames.
|
||||
|
||||
|
||||
#### Cgroups
|
||||
|
||||
|
||||
After checkpointing, the Docker daemon removes the container’s cgroups subdirectories (because the container has “exited”). This causes restore to fail.
|
||||
|
||||
The --manage-cgroups command line option was added to CRIU to dump and restore the process's cgroups along with their properties.
|
||||
|
||||
|
||||
The CRIU command lines are a simple container are shown below:
|
||||
```
|
||||
$ docker run -d busybox:latest /bin/sh -c 'i=0; while true; do echo $i \>\> /foo; i=$(expr $i + 1); sleep 3; done'
|
||||
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS
|
||||
168aefb8881b busybox:latest "/bin/sh -c 'i=0; 6 seconds ago Up 4 seconds
|
||||
|
||||
$ sudo criu dump -o dump.log -v4 -t 17810 \
|
||||
-D /tmp/img/\<container\_id\> \
|
||||
--root /var/lib/docker/aufs/mnt/\<container\_id\> \
|
||||
--ext-mount-map /etc/resolv.conf:/etc/resolv.conf \
|
||||
--ext-mount-map /etc/hosts:/etc/hosts \
|
||||
--ext-mount-map /etc/hostname:/etc/hostname \
|
||||
--ext-mount-map /.dockerinit:/.dockerinit \
|
||||
--manage-cgroups \
|
||||
--evasive-devices
|
||||
|
||||
$ docker ps -a
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS
|
||||
168aefb8881b busybox:latest "/bin/sh -c 'i=0; 6 minutes ago Exited (-1) 4 minutes ago
|
||||
|
||||
$ sudo mount -t aufs -o br=\
|
||||
/var/lib/docker/aufs/diff/\<container\_id\>:\
|
||||
/var/lib/docker/aufs/diff/\<container\_id\>-init:\
|
||||
/var/lib/docker/aufs/diff/a9eb172552348a9a49180694790b33a1097f546456d041b6e82e4d7716ddb721:\
|
||||
/var/lib/docker/aufs/diff/120e218dd395ec314e7b6249f39d2853911b3d6def6ea164ae05722649f34b16:\
|
||||
/var/lib/docker/aufs/diff/42eed7f1bf2ac3f1610c5e616d2ab1ee9c7290234240388d6297bc0f32c34229:\
|
||||
/var/lib/docker/aufs/diff/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158:\
|
||||
none /var/lib/docker/aufs/mnt/\<container\_id\>
|
||||
|
||||
$ sudo criu restore -o restore.log -v4 -d
|
||||
-D /tmp/img/\<container\_id\> \
|
||||
--root /var/lib/docker/aufs/mnt/\<container\_id\> \
|
||||
--ext-mount-map /etc/resolv.conf:/var/lib/docker/containers/\<container\_id\>/resolv.conf \
|
||||
--ext-mount-map /etc/hosts:/var/lib/docker/containers/\<container\_id\>/hosts \
|
||||
--ext-mount-map /etc/hostname:/var/lib/docker/containers/\<container\_id\>/hostname \
|
||||
--ext-mount-map /.dockerinit:/var/lib/docker/init/dockerinit-1.0.0 \
|
||||
--manage-cgroups \
|
||||
--evasive-devices
|
||||
|
||||
$ ps -ef | grep /bin/sh
|
||||
root 18580 1 0 12:38 ? 00:00:00 /bin/sh -c i=0; while true; do echo $i \>\> /foo; i=$(expr $i + 1); sleep 3; done
|
||||
|
||||
$ docker ps -a
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS
|
||||
168aefb8881b busybox:latest "/bin/sh -c 'i=0; 7 minutes ago Exited (-1) 5 minutes ago
|
||||
|
||||
docker\_cr.sh
|
||||
```
|
||||
|
||||
Since the command line arguments to CRIU were long, a helper script called docker\_cr.sh was provided in the CRIU source tree to simplify the proces. So, for the above container, one would simply C/R the container as follows (for details see [http://criu.org/Docker](http://criu.org/Docker)):
|
||||
|
||||
```
|
||||
$ sudo docker\_cr.sh -c 4397
|
||||
dump successful
|
||||
|
||||
$ sudo docker\_cr.sh -r 4397
|
||||
restore successful
|
||||
```
|
||||
At the end of Phase 1, it was possible to externally checkpoint and restore a Docker 1.0 container using either VFS, AUFS, or UnionFS storage drivers with CRIU v1.3.
|
||||
|
||||
#### Phase 2 - Native C/R
|
||||
|
||||
While external C/R served as a successful proof of concept for container C/R, it had the following limitations:
|
||||
|
||||
|
||||
1. State of a checkpointed container would show as "Exited".
|
||||
2. Docker commands such as logs, kill, etc. will not work on a restored container.
|
||||
3. The restored process tree will be a child of /etc/init instead of the Docker daemon.
|
||||
|
||||
Therefore, the second phase of the effort concentrated on adding native checkpoint and restore commands to Docker.
|
||||
|
||||
|
||||
#### libcontainer, nsinit
|
||||
|
||||
Libcontainer is Docker’s native execution driver. It provides a set of APIs to create and manage containers. The first step of adding native support was the introduction of two methods, checkpoint() and restore(), to libcontainer and the corresponding checkpoint and restore subcommands to nsinit. Nsinit is a simple utility that is used to test and debug libcontainer.
|
||||
|
||||
#### docker checkpoint, docker restore
|
||||
|
||||
With C/R support in libcontainer, the next step was adding checkpoint and restore subcommands to Docker itself. A big challenge in this step was to rebuild the “plumbing” between the container and the daemon. When the daemon initially starts a container, it sets up individual pipes between itself (parent) and the standard input, output, and error file descriptors of the container (child). This is how docker logs can show the output of a container.
|
||||
|
||||
When a container exits after being checkpointed, the pipes between it and the daemon are deleted. During container restore, it’s actually CRIU that is the parent. Therefore, setting up a pipe between the child (container) and an unrelated process (Docker daemon) required is not a bit of challenge.
|
||||
|
||||
To address this issue, the --inherit-fd command line option was added to CRIU. Using this option, the Docker daemon tells CRIU to let the restored container “inherit” certain file descriptors passed from the daemon to CRIU.
|
||||
|
||||
The first version of native C/R was demo'ed at the Linux Plumbers Conference (LPC) in October 2014 ([http://linuxplumbersconf.org/2014/ocw/proposals/1899](http://linuxplumbersconf.org/2014/ocw/proposals/1899)).
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
The LPC demo was done with a simple container that did not require network connectivity. Support for restoring network connections was done in early 2015 and demonstrated in this 2-minute [video clip](https://www.youtube.com/watch?v=HFt9v6yqsXo).
|
||||
|
||||
#### Current Status of Container C/R
|
||||
|
||||
In May 2015, the criu branch of libcontainer was merged into master. Using the newly-introduced lightweight [runC](https://blog.docker.com/2015/06/runc/) container runtime, container migration was demo’ed at DockerCon15. In this
|
||||
[](https://www.youtube.com/watch?v=?mL9AFkJJAq0) (minute 23:00), a container running Quake was checkpointed and restored on a different machine, effectively implementing container migration.
|
||||
|
||||
At the time of this writing, there are two repos on Github that have native C/R support in Docker:
|
||||
- [Docker 1.5](https://github.com/SaiedKazemi/docker/tree/cr) (old libcontainer, relatively stable)
|
||||
- [Docker 1.7](https://github.com/boucher/docker/tree/cr-combined) (newer, less stable)
|
||||
|
||||
Work is underway to merge C/R functionality into Docker. You can use either of the above repositories to experiment with Docker C/R. If you are using OverlayFS or your container workload uses AIO, please note the following:
|
||||
|
||||
|
||||
|
||||
#### OverlayFS
|
||||
When OverlayFS support was officially merged into the Linux kernel version 3.18, it became the preferred storage driver (instead of AUFS) . However, OverlayFS in 3.18 has the following issues:
|
||||
- /proc/\<pid\>/fdinfo/\<fd\> contains mnt\_id which isn’t in /proc/\<pid\>/mountinfo
|
||||
- /proc/\<pid\>/fd/\<fd\> does not contain an absolute path to the opened file
|
||||
|
||||
Both issues are fixed in this [patch](https://lkml.org/lkml/2015/3/20/372) but the patch has not been merged upstream yet.
|
||||
|
||||
#### AIO
|
||||
If you are using a kernel older than 3.19 and your container uses AIO, you need the following kernel patches from 3.19:
|
||||
|
||||
|
||||
- [torvalds: bd9b51e7](https://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=bd9b51e7) by Al Viro
|
||||
- [torvalds: e4a0d3e72](https://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=e4a0d3e72) by Pavel Emelyanov
|
||||
|
||||
|
||||
|
||||
- Saied Kazemi, Software Engineer at Google
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes 1.0 Launch Event at OSCON "
|
||||
date: Friday, July 02, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
In case you haven't heard, the Kubernetes project team & community have some awesome stuff lined up for our release event at OSCON in a few weeks.
|
||||
|
||||
If you haven't already registered for in person or live stream, please do it now! check out [kuberneteslaunch.com](http://kuberneteslaunch.com/) for all the details. You can also find out there how to get a free expo pass for OSCON which you'll need to attend in person.
|
||||
|
||||
We'll have talks from Google executives Brian Stevens, VP of Cloud Product, and Eric Brewer, VP of Google Infrastructure. They will share their perspective on where Kubernetes is and where it's going that you won't want to miss.
|
||||
|
||||
Several of our community partners will be there including CoreOS, Redapt, Intel, Mesosphere, Mirantis, the OpenStack Foundation, CloudBees, Kismatic and Bitnami.
|
||||
|
||||
And real life users of Kubernetes will be there too. We've announced that zulily Principal Engineer Steve Reed is speaking, and we will let you know about others over the next few days. Let's just say it's a pretty cool list.
|
||||
|
||||
Check it out now - kuberneteslaunch.com
|
||||
|
|
@ -0,0 +1,276 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Strong, Simple SSL for Kubernetes Services "
|
||||
date: Wednesday, July 14, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Hi, I’m Evan Brown [(@evandbrown](http://twitter.com/evandbrown)) and I work on the solutions architecture team for Google Cloud Platform. I recently wrote an [article](https://cloud.google.com/solutions/automated-build-images-with-jenkins-kubernetes) and [tutorial](https://github.com/GoogleCloudPlatform/kube-jenkins-imager) about using Jenkins on Kubernetes to automate the Docker and GCE image build process. Today I’m going to discuss how I used Kubernetes services and secrets to add SSL to the Jenkins web UI. After reading this, you’ll be able to add SSL termination (and HTTP-\>HTTPS redirects + basic auth) to your public HTTP Kubernetes services.
|
||||
|
||||
### In the beginning
|
||||
|
||||
In the spirit of minimum viability, the first version of Jenkins-on-Kubernetes I built was very basic but functional:
|
||||
|
||||
- The Jenkins leader was just a single container in one pod, but it was managed by a replication controller, so if it failed it would automatically respawn.
|
||||
- The Jenkins leader exposes two ports - TCP 8080 for the web UI and TCP 50000 for build agents to register - and those ports are made available as a Kubernetes service with a public load balancer.
|
||||
|
||||
|
||||
|
||||
Here’s a visual of that first version:
|
||||
|
||||
[](http://1.bp.blogspot.com/-ccmpTmulrng/VaVxOs7gysI/AAAAAAAAAU8/bCEzgGGm-pE/s1600/0.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
This works, but I have a few problems with it. First, authentication isn’t configured in a default Jenkins installation. The leader is sitting on the public Internet, accessible to anyone, until you connect and configure authentication. And since there’s no encryption, configuring authentication is kind of a symbolic gesture. We need SSL, and we need it now!
|
||||
|
||||
### Do what you know
|
||||
|
||||
For a few milliseconds I considered trying to get SSL working directly on Jenkins. I’d never done it before, and I caught myself wondering if it would be as straightforward as working with SSL on [Nginx](http://nginx.org/), something I do have experience with. I’m all for learning new things, but this seemed like a great place to not invent a new wheel: SSL on Nginx is straightforward and well documented (as are its reverse-proxy capabilities), and Kubernetes is all about building functionality by orchestrating and composing containers. Let’s use Nginx, and add a few bonus features that Nginx makes simple: HTTP-\>HTTPS redirection, and basic access authentication.
|
||||
|
||||
### SSL termination proxy as an nginx service
|
||||
|
||||
I started by putting together a [Dockerfile](https://github.com/GoogleCloudPlatform/nginx-ssl-proxy/blob/master/Dockerfile) that inherited from the standard nginx image, copied a few Nginx config files, and added a custom entrypoint (start.sh). The entrypoint script checks an environment variable (ENABLE\_SSL) and activates the correct Nginx config accordingly (meaning that unencrypted HTTP reverse proxy is possible, but that defeats the purpose). The script also configures basic access authentication if it’s enabled (the ENABLE\_BASIC\_AUTH env var).
|
||||
|
||||
|
||||
|
||||
Finally, start.sh evaluates the SERVICE\_HOST\_ENV\_NAME and SERVICE\_PORT\_ENV\_NAME env vars. These variables should be set to the names of the environment variables for the Kubernetes service you want to proxy to. In this example, the service for our Jenkins leader is cleverly named jenkins, which means pods in the cluster will see an environment variable named JENKINS\_SERVICE\_HOST and JENKINS\_SERVICE\_PORT\_UI (the port that 8080 is mapped to on the Jenkins leader). SERVICE\_HOST\_ENV\_NAME and SERVICE\_PORT\_ENV\_NAME simply reference the correct service to use for a particular scenario, allowing the image to be used generically across deployments.
|
||||
|
||||
### Defining the Controller and Service
|
||||
|
||||
LIke every other pod in this example, we’ll deploy Nginx with a replication controller, allowing us to scale out or in, and recover automatically from container failures. This excerpt from a[complete descriptor in the sample app](https://github.com/GoogleCloudPlatform/kube-jenkins-imager/blob/master/ssl_proxy.yaml#L20-L48) shows some relevant bits of the pod spec:
|
||||
|
||||
|
||||
|
||||
```
|
||||
spec:
|
||||
|
||||
containers:
|
||||
|
||||
-
|
||||
|
||||
name: "nginx-ssl-proxy"
|
||||
|
||||
image: "gcr.io/cloud-solutions-images/nginx-ssl-proxy:latest"
|
||||
|
||||
env:
|
||||
|
||||
-
|
||||
|
||||
name: "SERVICE\_HOST\_ENV\_NAME"
|
||||
|
||||
value: "JENKINS\_SERVICE\_HOST"
|
||||
|
||||
-
|
||||
|
||||
name: "SERVICE\_PORT\_ENV\_NAME"
|
||||
|
||||
value: "JENKINS\_SERVICE\_PORT\_UI"
|
||||
|
||||
-
|
||||
|
||||
name: "ENABLE\_SSL"
|
||||
|
||||
value: "true"
|
||||
|
||||
-
|
||||
|
||||
name: "ENABLE\_BASIC\_AUTH"
|
||||
|
||||
value: "true"
|
||||
|
||||
ports:
|
||||
|
||||
-
|
||||
|
||||
name: "nginx-ssl-proxy-http"
|
||||
|
||||
containerPort: 80
|
||||
|
||||
-
|
||||
|
||||
name: "nginx-ssl-proxy-https"
|
||||
|
||||
containerPort: 443
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
The pod will have a service exposing TCP 80 and 443 to a public load balancer. Here’s the service descriptor [(also available in the sample app](https://github.com/GoogleCloudPlatform/kube-jenkins-imager/blob/master/service_ssl_proxy.yaml)):
|
||||
|
||||
|
||||
|
||||
```
|
||||
kind: "Service"
|
||||
|
||||
apiVersion: "v1"
|
||||
|
||||
metadata:
|
||||
|
||||
name: "nginx-ssl-proxy"
|
||||
|
||||
labels:
|
||||
|
||||
name: "nginx"
|
||||
|
||||
role: "ssl-proxy"
|
||||
|
||||
spec:
|
||||
|
||||
ports:
|
||||
|
||||
-
|
||||
|
||||
name: "https"
|
||||
|
||||
port: 443
|
||||
|
||||
targetPort: "nginx-ssl-proxy-https"
|
||||
|
||||
protocol: "TCP"
|
||||
|
||||
-
|
||||
|
||||
name: "http"
|
||||
|
||||
port: 80
|
||||
|
||||
targetPort: "nginx-ssl-proxy-http"
|
||||
|
||||
protocol: "TCP"
|
||||
|
||||
selector:
|
||||
|
||||
name: "nginx"
|
||||
|
||||
role: "ssl-proxy"
|
||||
|
||||
type: "LoadBalancer"
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
And here’s an overview with the SSL termination proxy in place. Notice that Jenkins is no longer directly exposed to the public Internet:
|
||||
|
||||
[](http://3.bp.blogspot.com/-0B1BEQo_fWc/VaVxVUBkf3I/AAAAAAAAAVE/5yCCnA29C88/s1600/0%2B%25281%2529.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Now, how did the Nginx pods get ahold of the super-secret SSL key/cert and htpasswd file (for basic access auth)?
|
||||
|
||||
### Keep it secret, keep it safe
|
||||
|
||||
Kubernetes has an [API and resource for Secrets](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/secrets.md). Secrets “are intended to hold sensitive information, such as passwords, OAuth tokens, and ssh keys. Putting this information in a secret is safer and more flexible than putting it verbatim in a pod definition or in a docker image.”
|
||||
|
||||
|
||||
|
||||
You can create secrets in your cluster in 3 simple steps:
|
||||
|
||||
|
||||
|
||||
1.
|
||||
Base64-encode your secret data (i.e., SSL key pair or htpasswd file)
|
||||
|
||||
```
|
||||
$ cat ssl.key | base64
|
||||
LS0tLS1CRUdJTiBDRVJUS...
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
1.
|
||||
Create a json document describing your secret, and add the base64-encoded values:
|
||||
|
||||
```
|
||||
apiVersion: "v1"
|
||||
|
||||
kind: "Secret"
|
||||
|
||||
metadata:
|
||||
|
||||
name: "ssl-proxy-secret"
|
||||
|
||||
namespace: "default"
|
||||
|
||||
data:
|
||||
|
||||
proxycert: "LS0tLS1CRUd..."
|
||||
|
||||
proxykey: "LS0tLS1CR..."
|
||||
|
||||
htpasswd: "ZXZhb..."
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
1.
|
||||
Create the secrets resource:
|
||||
|
||||
```
|
||||
$ kubectl create -f secrets.json
|
||||
```
|
||||
|
||||
|
||||
To access the secrets from a container, specify them as a volume mount in your pod spec. Here’s the relevant excerpt from the [Nginx proxy template](https://github.com/GoogleCloudPlatform/kube-jenkins-imager/blob/master/ssl_proxy.yaml###L41-L48) we saw earlier:
|
||||
|
||||
|
||||
|
||||
```
|
||||
spec:
|
||||
|
||||
containers:
|
||||
|
||||
-
|
||||
|
||||
name: "nginx-ssl-proxy"
|
||||
|
||||
image: "gcr.io/cloud-solutions-images/nginx-ssl-proxy:latest"
|
||||
|
||||
env: [...]
|
||||
|
||||
ports: ...[]
|
||||
|
||||
volumeMounts:
|
||||
|
||||
-
|
||||
|
||||
name: "secrets"
|
||||
|
||||
mountPath: "/etc/secrets"
|
||||
|
||||
readOnly: true
|
||||
|
||||
volumes:
|
||||
|
||||
-
|
||||
|
||||
name: "secrets"
|
||||
|
||||
secret:
|
||||
|
||||
secretName: "ssl-proxy-secret"
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
A volume of type secret that points to the ssl-proxy-secret secret resource is defined, and then mounted into /etc/secrets in the container. The secrets spec in the earlier example defined data.proxycert, data.proxykey, and data.htpasswd, so we would see those files appear (base64-decoded) in /etc/secrets/proxycert, /etc/secrets/proxykey, and /etc/secrets/htpasswd for the Nginx process to access.
|
||||
|
||||
|
||||
|
||||
All together now
|
||||
|
||||
I have “containers and Kubernetes are fun and cool!” moments all the time, like probably every day. I’m beginning to have “containers and Kubernetes are extremely useful and powerful and are adding value to what I do by helping me do important things with ease” more frequently. This SSL termination proxy with Nginx example is definitely one of the latter. I didn’t have to waste time learning a new way to use SSL. I was able to solve my problem using well-known tools, in a reusable way, and quickly (from idea to working took about 2 hours).
|
||||
|
||||
|
||||
Check out the complete [Automated Image Builds with Jenkins, Packer, and Kubernetes](https://github.com/GoogleCloudPlatform/kube-jenkins-imager) repo to see how the SSL termination proxy is used in a real cluster, or dig into the details of the proxy image in the [nginx-ssl-proxy repo](https://github.com/GoogleCloudPlatform/nginx-ssl-proxy) (complete with a Dockerfile and Packer template so you can build the image yourself).
|
||||
|
|
@ -0,0 +1,234 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " The Growing Kubernetes Ecosystem "
|
||||
date: Saturday, July 24, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Over the past year, we’ve seen fantastic momentum in the Kubernetes project, culminating with the release of [Kubernetes v1][4] earlier this week. We’ve also witnessed the ecosystem around Kubernetes blossom, and wanted to draw attention to some of the cooler offerings we’ve seen.
|
||||
|
||||
|
||||
| ----- |
|
||||
|
|
||||
|
||||
![][5]
|
||||
|
||||
|
|
||||
|
||||
[CloudBees][6] and the Jenkins community have created a Kubernetes plugin, allowing Jenkins slaves to be built as Docker images and run in Docker hosts managed by Kubernetes, either on the Google Cloud Platform or on a more local Kubernetes instance. These elastic slaves are then brought online as Jenkins schedules jobs for them and destroyed after their builds are complete, ensuring masters have steady access to clean workspaces and minimizing builds’ resource footprint.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][7]
|
||||
|
||||
|
|
||||
|
||||
[CoreOS][8] has created launched Tectonic, an opinionated enterprise distribution of Kubernetes, CoreOS and Docker. Tectonic includes a management console for workflows and dashboards, an integrated registry to build and share containers, and additional tools to automate deployment and customize rolling updates. At KuberCon, CoreOS launched Tectonic Preview, giving users easy access to Kubernetes 1.0, 24x7 enterprise ready support, Kubernetes guides and Kubernetes training to help enterprises begin experiencing the power of Kubernetes, CoreOS and Docker.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][9]
|
||||
|
||||
|
|
||||
|
||||
[Hitachi Data Systems][10] has announced that Kubernetes now joins the list of solutions validated to run on their enterprise Unified Computing Platform. With this announcement Hitachi has validated Kubernetes and VMware running side-by-side on the UCP platform, providing an enterprise solution for container-based applications and traditional virtualized workloads.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][11]
|
||||
|
||||
|
|
||||
|
||||
[Kismatic][12] is providing enterprise support for pure play open source Kubernetes. They have announced open source and commercially supported Kubernetes plug-ins specifically built for production-grade enterprise environments. Any Kubernetes deployment can now benefit from modular role-based access controls (RBAC), Kerberos for bedrock authentication, LDAP/AD integration, rich auditing and platform-agnostic Linux distro packages.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][13]
|
||||
|
||||
|
|
||||
|
||||
[Meteor Development Group][14], creators of Meteor, a JavaScript App Platform, are using Kubernetes to build [Galaxy][14] to run Meteor apps in production. Galaxy will scale from free test apps to production-suitable high-availability hosting.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][15]
|
||||
|
||||
|
|
||||
|
||||
Mesosphere has incorporated Kubernetes into its Data Center Operating System (DCOS) platform as a first class citizen. Using DCOS, enterprises can deploy Kubernetes across thousands of nodes, both bare-metal and virtualized machines that can run on-premise and in the cloud. Mesosphere also launched a beta of their [Kubernetes Training Bootcamp][16] and will be offering more in the future.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][17]
|
||||
|
||||
|
|
||||
|
||||
[Mirantis][18] is enabling hybrid cloud applications across OpenStack and other clouds supporting Kubernetes. An OpenStack Murano app package supports full application lifecycle actions such as deploy, create cluster, create pod, add containers to pods, scale up and scale down.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][19]
|
||||
|
||||
|
|
||||
|
||||
[OpenContrail][20] is creating a kubernetes-contrail plugin designed to stitch the cluster management capabilities of Kubernetes with the network service automation capabilities of OpenContrail. Given the event-driven abstractions of pods and services inherent in Kubernetes, it is a simple extension to address network service enforcement by leveraging OpenContrail’s Virtual Network policy approach and programmatic API’s.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![logo.png][21]
|
||||
|
||||
|
|
||||
|
||||
[Pachyderm][22] is a containerized data analytics engine which provides the broad functionality of Hadoop with the ease of use of Docker. Users simply provide containers with their data analysis logic and Pachyderm will distribute that computation over the data. They have just released full deployment on Kubernetes for on premise deployments, and on Google Container Engine, eliminating all the operational overhead of running a cluster yourself.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][23]
|
||||
|
||||
|
|
||||
|
||||
[Platalytics, Inc][24]. and announced the release of one-touch deploy-anywhere feature for its Spark Application Platform. Based on Kubernetes, Docker, and CoreOS, it allows simple and automated deployment of Apache Hadoop, Spark, and Platalytics platform, with a single click, to all major public clouds, including Google, Amazon, Azure, Digital Ocean, and private on-premise clouds. It also enables hybrid cloud scenarios, where resources on public and private clouds can be mixed.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][25]
|
||||
|
||||
|
|
||||
|
||||
[Rackspace][26] has created Corekube as a simple, quick way to deploy Kubernetes on OpenStack. By using a decoupled infrastructure that is coordinated by etcd, fleet and flannel, it enables users to try Kubernetes and CoreOS without all the fuss of setting things up by hand.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][27]
|
||||
|
||||
|
|
||||
|
||||
[Red Hat][28] is a long time proponent of Kubernetes, and a significant contributor to the project. In their own words, “From Red Hat Enterprise Linux 7 and Red Hat Enterprise Linux Atomic Host to OpenShift Enterprise 3 and the forthcoming Red Hat Atomic Enterprise Platform, we are well-suited to bring container innovations into the enterprise, leveraging Kubernetes as the common backbone for orchestration.”
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][29]
|
||||
|
||||
|
|
||||
|
||||
[Redapt][30] has launching a variety of turnkey, on-premises Kubernetes solutions co-engineered with other partners in the Kubernetes partner ecosystem. These include appliances built to leverage the CoreOS/Tectonic, Mirantis OpenStack, and Mesosphere platforms for management and provisioning. Redapt also offers private, public, and multi-cloud solutions that help customers accelerate their Kubernetes deployments successfully into production.
|
||||
|
||||
|
|
||||
|
||||
| ----- |
|
||||
|
|
||||
|
||||
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
We’ve also seen a community of services partners spring up to assist in adopting Kubernetes and containers:
|
||||
|
||||
|
||||
|
||||
| ----- |
|
||||
|
|
||||
|
||||
![Screen Shot 2015-07-21 at 1.12.16 PM.png][31]
|
||||
|
||||
|
|
||||
|
||||
|
||||
|
||||
[Biarca][32] is using Kubernetes to ease application deployment and scale on demand across available hybrid and multi-cloud clusters through strategically managed policy. A video on their website illustrates how to use Kubernetes to deploy applications in a private cloud infrastructure based on OpenStack and use a public cloud like GCE to address bursting demand for applications.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][33]
|
||||
|
||||
|
|
||||
|
||||
[Cloud Technology Partners][34] has developed a Container Services Offering featuring Kubernetes to assist enterprises with container best practices, adoption and implementation. This offering helps organizations understand how containers deliver competitive edge.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][35]
|
||||
|
||||
|
|
||||
|
||||
[DoIT International][36] is offering a Kubernetes Bootcamp which consists of a series of hands-on exercises interleaved with mini-lectures covering hands on topics such as Container Basics, Using Docker, Kubernetes and Google Container Engine.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][37]
|
||||
|
||||
|
|
||||
|
||||
[OpenCredo][38] provides a practical, lab style container and scheduler course in addition to consulting and solution delivery. The three-day course allows development teams to quickly ramp up and make effective use of containers in real world scenarios, covering containers in general along with Docker and Kubernetes.
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
![][39]
|
||||
|
||||
|
|
||||
|
||||
[Pythian][40] focuses on helping clients design, implement, and manage systems that directly contribute to revenue and business success. They provide small, [dedicated teams of highly trained and experienced data experts][41] have the deep Kubernetes and container experience necessary to help companies solve Big Data problems with containers.
|
||||
|
||||
|
|
||||
|
||||
\- Martin Buhr, Product Manager at Google
|
||||
|
||||
[1]: https://lh4.googleusercontent.com/2dJvY1Cl9i6SQ8apKARcisvFZPDYY5LltIsmz3W-jmon7DFE4p7cz3gsBPuz9KM_LSiuwx1xIPYr9Ygm5DTQ2f-DUyWsg7zs7YL7O3JMCHQ8Ji4B3EGpx26fbF_glQPPPp4RQTE
|
||||
[2]: http://blog.cloudbees.com/2015/07/on-demand-jenkins-slaves-with.html
|
||||
[3]: https://lh4.googleusercontent.com/vC0B6UWRaxOq9ar-7naIX9HNs9ANfq8f5VTP-MpIOpRTxHeE7kMDAcmswsDF6SVsd_xtRa7Kr2z3wJCXbGj2Lp6fp7pfhaWd5bHuA9_cYHhvY1WmQEjXHdPZxYzwBqExAmtTdiA
|
||||
[4]: https://tectonic.com/
|
||||
[5]: https://lh6.googleusercontent.com/Y6MY5k_Eq6CddNzfRrRo14kLuJwe1KYtJq_7KcIGy1bRf65KwoX1uAuCBwEL0P_FGSomZPQZ-hs7CG8Vze7qDKsISZrLEyRZkm5OSHngjjXfCItCiMXI3FtnD9iyDvYurd5sRXQ
|
||||
[6]: https://www.hds.com/corporate/press-analyst-center/press-releases/2015/gl150721.html
|
||||
[7]: https://lh4.googleusercontent.com/iHZAfjvGPHYsIwUgevTTPN74fBU53Y1qdwq9hUsIixLWIbbv7P_02CQR6V5LPi4n4BCeg1LK3g5Iaizpkm5dXCmI7TdYKEaC7H2wLa9tzSkp8TyR93U1SilcGvpLDlzPLWhY664
|
||||
[8]: https://www.kismatic.com/
|
||||
[9]: https://lh5.googleusercontent.com/kTu3RRmc1LH1vgdHQeCibALfJJCxE9JR5ZRE30xAn_bphO_uk-2n3RRolw3Yrb1uheyXMQRsH8ps7v3mrvhjkJo0f2ye7unVd1PT0trv8cE5VP1Pnq5P4oUx6m7DWKANZyyBnsg
|
||||
[10]: http://info.meteor.com/blog/meteor-and-a-galaxy-of-containers-with-kubernetes
|
||||
[11]: https://lh5.googleusercontent.com/H1r-80pX8-ixDHCJDBKLWkNA1keMUvjv058e87-B80Wr8LSxP7SjSXc-5ru3MT4k18zYxl0L8aqJv3aylx8UYNGAXEmCCuHKwjZ4Z5tbG-LFCTiyRVdrlVUukHhi8QtsbuR1u3c
|
||||
[12]: https://mesosphere.com/training/kubernetes/
|
||||
[13]: https://lh6.googleusercontent.com/7BkcAAf9SoEDyzjgGsNg_YVi8cRb1mdPHsc4FtK7JQkl2iVR_zIy9wkDPT7bls-z7FhgTIekAj1Z7q6Y_4oaZ2OLygkHxPmxZ3MNnkI4f8C78cjyk2gvt40Yk-m3_VSt8sIXz2Q
|
||||
[14]: https://www.mirantis.com/blog/kubernetes-docker-mirantis-openstack-6-1/
|
||||
[15]: https://lh6.googleusercontent.com/Zi_nKEcB6uWZYXMOBStKPFLkHIXQn2FsnFP4ab2BFeBbUWv-d1oEBLQos-OpYpfwO3mao6xGusvX9O1JiyL4357XJBsmTXmcSnTnrBXCBOxJkB1uhOjntfAv8fN2YjZ6ITK53YU
|
||||
[16]: http://www.opencontrail.org/opencontrail-kubernetes-integration/
|
||||
[17]: https://lh5.googleusercontent.com/F9dS-UFz8L50xoj8jCjgUvOo-r3pNLs4cEGRczHu5mD8YdMgnJctyzBuWQ0LmZeBB3cDHc1LB_4kHZDmjuP6KGr_n3W8Q0fGbBHxinRZggdMC0NDDWl-xDwy68GO6qotJr2JcOA
|
||||
[18]: http://pachyderm.io/
|
||||
[19]: https://lh6.googleusercontent.com/dXhnvnlWtL9-oTd_irtLYTu8g78l9-LKj9PwjV5v4mpvGcPh4GQlHeQZpnIMJGwEyBxagut94Onagb0GsVJuVx10VVp-GHZ0vG_Z-jbxthLHhuzhQaBSFfA9pfoOI3cl6Rh7Hk4
|
||||
[20]: http://www.platalytics.com/
|
||||
[21]: https://lh3.googleusercontent.com/0EQQc3sjVbw1cEYVeT0S5rT1iPLEMHteiKlSMDNqw8lNVOf4vG5qE6pVfvmZlRcg-NoOABC-mMcMSdD8ayrmpok0T91N15QqqmH378ydxK1843dcuJdtEsCnr1Y_RQQo-hWrBfI
|
||||
[22]: https://github.com/metral/corekube
|
||||
[23]: https://lh4.googleusercontent.com/qxQciTVBkyYDWeSgoxtg7InxQuuXsGSLBDfdxJB9Czo71BzQN5bUugLZhQKkERHqWAnkqHIY2VWi2J7g-pGn4V4AzPE0alBksedou78r0KMZm4QqYTN8QYHIMo4RtVmdw90azYw
|
||||
[24]: http://www.redhat.com/en/about/blog/welcoming-kubernetes-officially-enterprise-open-source-world
|
||||
[25]: https://lh3.googleusercontent.com/0EQQc3sjVbw1cEYVeT0S5rT1iPLEMHteiKlSMDNqw8lNVOf4vG5qE6pVfvmZlRcg-NoOABC-mMcMSdD8ayrmpok0T91N15QqqmH378ydxK1843dcuJdtEsCnr1Y_RQQo-hWrBfI
|
||||
[26]: https://github.com/metral/corekube
|
||||
[27]: https://lh4.googleusercontent.com/qxQciTVBkyYDWeSgoxtg7InxQuuXsGSLBDfdxJB9Czo71BzQN5bUugLZhQKkERHqWAnkqHIY2VWi2J7g-pGn4V4AzPE0alBksedou78r0KMZm4QqYTN8QYHIMo4RtVmdw90azYw
|
||||
[28]: http://www.redhat.com/en/about/blog/welcoming-kubernetes-officially-enterprise-open-source-world
|
||||
[29]: https://lh5.googleusercontent.com/8FfYhnwb__NUuoXEC-tNzuAuA6rFGz6IgQnVYh-fQ89i685-3t_2UjN291S-VZAAkyrPJ-MaAPMr36uV0PLWlv_GE1aE99shx_XzrEi4c8OKcEkiRs3z_tsB20w5ZiZ7UeZgzT8
|
||||
[30]: http://www.redapt.com/kubernetes/%20%E2%80%8E
|
||||
[31]: https://lh3.googleusercontent.com/dOHU9NjLGrG6UgGuNjvhuR5oDkrR5z1AZ0sM8BkLgaMuXY7pfDev8ukVbD1nrBeRj9LKryJcoGEvhZSo_dHIP8ahHIkAWqsT_QSOoiu7rfM9WX3lubCI4N1WKmE7yrRquaL7nAc
|
||||
[32]: http://biarca.io/building-distributed-multi-cloud-applications-using-kubernetes-and-containers/
|
||||
[33]: https://lh3.googleusercontent.com/Ac0FiR1FJ4tfp90zBVX7fr36BAVxUqRW7VIOFw12Rp6BzHRR0x_BwTfbaheXLYSYMuPZouf4huql04Uu9fVEn956b7BWIUcTzUgWuB5JYSFawwrP_AA6uzdOHZAQ2aROo1vhm1s
|
||||
[34]: http://www.cloudtp.com/container-adoption-services/
|
||||
[35]: https://lh6.googleusercontent.com/tBtFRPzI6OAPKvaak9X3QWcrzGuBsrk1szFGi-Bq3EQweBo6nZ0Qmwxk9EwLZ9ItP9-1Zip4rxtwtFa0ILylO1CySuOa1qLcO2ab0yJCN1SCe-r_BNPX8hD5Qigxb7sqqXgx09A
|
||||
[36]: http://doit-intl.com/kubernetes
|
||||
[37]: https://lh3.googleusercontent.com/qO2YK7IxIVPpIsdN0Ry7B5zc_cdzfZb6DlgAJWpy-VJajL84m3u2nyo3-6QRZ_wFCY0-r4ryltiT4j1D_y_BeguxGXWap2YlSfdqyYAIbi2__p0uLXymtYkAu5VFVfA___eMbUY
|
||||
[38]: https://www.opencredo.com/2015/04/20/kubernetes/
|
||||
[39]: https://lh5.googleusercontent.com/XgMDUbRt_UKn4v4D7roz4mpE4qqUYpLI2c9460vt65yXrLxhcrM3rmH9Xcg-C0RMylhRxTWIMFInHYLN1O9v9FZ1NoUVI6ynsmoAQUGMN1Nc27jhXzIRiRXwWzx_HOH5TtX3NaE
|
||||
[40]: http://www.pythian.com/google-kubernetes/
|
||||
[41]: http://www.pythian.com/blog/lessons-learned-kubernetes/
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - July 10 2015 "
|
||||
date: Tuesday, July 13, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Here are the notes from today's meeting:
|
||||
|
||||
* Eric Paris: replacing salt with ansible (if we want)
|
||||
* In contrib, there is a provisioning tool written in ansible
|
||||
* The goal in the rewrite was to eliminate as much of the cloud provider stuff as possible
|
||||
* The salt setup does a bunch of setup in scripts and then the environment is setup with salt
|
||||
* This means that things like generating certs is done differently on GCE/AWS/Vagrant
|
||||
* For ansible, everything must be done within ansible
|
||||
* Background on ansible
|
||||
* Does not have clients
|
||||
* Provisioner ssh into the machine and runs scripts on the machine
|
||||
* You define what you want your cluster to look like, run the script, and it sets up everything at once
|
||||
* If you make one change in a config file, ansible re-runs everything (which isn’t always desirable)
|
||||
* Uses a jinja2 template
|
||||
* Create machines with minimal software, then use ansible to get that machine into a runnable state
|
||||
* Sets up all of the add-ons
|
||||
* Eliminates the provisioner shell scripts
|
||||
* Full cluster setup currently takes about 6 minutes
|
||||
* CentOS with some packages
|
||||
* Redeploy to the cluster takes 25 seconds
|
||||
* Questions for Eric
|
||||
* Where does the provider-specific configuration go?
|
||||
* The only network setup that the ansible config does is flannel; you can turn it off
|
||||
* What about init vs. systemd?
|
||||
* Should be able to support in the code w/o any trouble (not yet implemented)
|
||||
* Discussion
|
||||
* Why not push the setup work into containers or kubernetes config?
|
||||
* To bootstrap a cluster drop a kubelet and a manifest
|
||||
* Running a kubelet and configuring the network should be the only things required. We can cut a machine image that is preconfigured minus the data package (certs, etc)
|
||||
* The ansible scripts install kubelet & docker if they aren’t already installed
|
||||
* Each OS (RedHat, Debian, Ubuntu) could have a different image. We could view this as part of the build process instead of the install process.
|
||||
* There needs to be solution for bare metal as well.
|
||||
* In favor of the overall goal -- reducing the special configuration in the salt configuration
|
||||
* Everything except the kubelet should run inside a container (eventually the kubelet should as well)
|
||||
* Running in a container doesn’t cut down on the complexity that we currently have
|
||||
* But it does more clearly define the interface about what the code expects
|
||||
* These tools (Chef, Puppet, Ansible) conflate binary distribution with configuration
|
||||
* Containers more clearly separate these problems
|
||||
* The mesos deployment is not completely automated yet, but the mesos deployment is completely different: kubelets get put on top on an existing mesos cluster
|
||||
* The bash scripts allow the mesos devs to see what each cloud provider is doing and re-use the relevant bits
|
||||
* There was a large reverse engineering curve, but the bash is at least readable as opposed to the salt
|
||||
* Openstack uses a different deployment as well
|
||||
* We need a well documented list of steps (e.g. create certs) that are necessary to stand up a cluster
|
||||
* This would allow us to compare across cloud providers
|
||||
* We should reduce the number of steps as much as possible
|
||||
* Ansible has 241 steps to launch a cluster
|
||||
* 1.0 Code freeze
|
||||
* How are we getting out of code freeze?
|
||||
* This is a topic for next week, but the preview is that we will move slowly rather than totally opening the firehose
|
||||
* We want to clear the backlog as fast as possible while maintaining stability both on HEAD and on the 1.0 branch
|
||||
* The backlog of almost 300 PRs but there are also various parallel feature branches that have been developed during the freeze
|
||||
* Cutting a cherry pick release today (1.0.1) that fixes a few issues
|
||||
* Next week we will discuss the cadence for patch releases
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - July 17 2015 "
|
||||
date: Friday, July 23, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Here are the notes from today's meeting:
|
||||
|
||||
|
||||
|
||||
-
|
||||
Eric Paris: replacing salt with ansible (if we want)
|
||||
|
||||
-
|
||||
In contrib, there is a provisioning tool written in ansible
|
||||
-
|
||||
The goal in the rewrite was to eliminate as much of the cloud provider stuff as possible
|
||||
-
|
||||
The salt setup does a bunch of setup in scripts and then the environment is setup with salt
|
||||
|
||||
-
|
||||
This means that things like generating certs is done differently on GCE/AWS/Vagrant
|
||||
-
|
||||
For ansible, everything must be done within ansible
|
||||
-
|
||||
Background on ansible
|
||||
|
||||
-
|
||||
Does not have clients
|
||||
-
|
||||
Provisioner ssh into the machine and runs scripts on the machine
|
||||
-
|
||||
You define what you want your cluster to look like, run the script, and it sets up everything at once
|
||||
-
|
||||
If you make one change in a config file, ansible re-runs everything (which isn’t always desirable)
|
||||
-
|
||||
Uses a jinja2 template
|
||||
-
|
||||
Create machines with minimal software, then use ansible to get that machine into a runnable state
|
||||
|
||||
-
|
||||
Sets up all of the add-ons
|
||||
-
|
||||
Eliminates the provisioner shell scripts
|
||||
-
|
||||
Full cluster setup currently takes about 6 minutes
|
||||
|
||||
-
|
||||
CentOS with some packages
|
||||
-
|
||||
Redeploy to the cluster takes 25 seconds
|
||||
-
|
||||
Questions for Eric
|
||||
|
||||
-
|
||||
Where does the provider-specific configuration go?
|
||||
|
||||
-
|
||||
The only network setup that the ansible config does is flannel; you can turn it off
|
||||
-
|
||||
What about init vs. systemd?
|
||||
|
||||
-
|
||||
Should be able to support in the code w/o any trouble (not yet implemented)
|
||||
-
|
||||
Discussion
|
||||
|
||||
-
|
||||
Why not push the setup work into containers or kubernetes config?
|
||||
|
||||
-
|
||||
To bootstrap a cluster drop a kubelet and a manifest
|
||||
-
|
||||
Running a kubelet and configuring the network should be the only things required. We can cut a machine image that is preconfigured minus the data package (certs, etc)
|
||||
|
||||
-
|
||||
The ansible scripts install kubelet & docker if they aren’t already installed
|
||||
-
|
||||
Each OS (RedHat, Debian, Ubuntu) could have a different image. We could view this as part of the build process instead of the install process.
|
||||
-
|
||||
There needs to be solution for bare metal as well.
|
||||
-
|
||||
In favor of the overall goal -- reducing the special configuration in the salt configuration
|
||||
-
|
||||
Everything except the kubelet should run inside a container (eventually the kubelet should as well)
|
||||
|
||||
-
|
||||
Running in a container doesn’t cut down on the complexity that we currently have
|
||||
-
|
||||
But it does more clearly define the interface about what the code expects
|
||||
-
|
||||
These tools (Chef, Puppet, Ansible) conflate binary distribution with configuration
|
||||
|
||||
-
|
||||
Containers more clearly separate these problems
|
||||
-
|
||||
The mesos deployment is not completely automated yet, but the mesos deployment is completely different: kubelets get put on top on an existing mesos cluster
|
||||
|
||||
-
|
||||
The bash scripts allow the mesos devs to see what each cloud provider is doing and re-use the relevant bits
|
||||
-
|
||||
There was a large reverse engineering curve, but the bash is at least readable as opposed to the salt
|
||||
-
|
||||
Openstack uses a different deployment as well
|
||||
-
|
||||
We need a well documented list of steps (e.g. create certs) that are necessary to stand up a cluster
|
||||
|
||||
-
|
||||
This would allow us to compare across cloud providers
|
||||
-
|
||||
We should reduce the number of steps as much as possible
|
||||
-
|
||||
Ansible has 241 steps to launch a cluster
|
||||
-
|
||||
1.0 Code freeze
|
||||
|
||||
-
|
||||
How are we getting out of code freeze?
|
||||
-
|
||||
This is a topic for next week, but the preview is that we will move slowly rather than totally opening the firehose
|
||||
|
||||
-
|
||||
We want to clear the backlog as fast as possible while maintaining stability both on HEAD and on the 1.0 branch
|
||||
-
|
||||
The backlog of almost 300 PRs but there are also various parallel feature branches that have been developed during the freeze
|
||||
-
|
||||
Cutting a cherry pick release today (1.0.1) that fixes a few issues
|
||||
- Next week we will discuss the cadence for patch releases
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Using Kubernetes Namespaces to Manage Environments "
|
||||
date: Saturday, August 28, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
##### One of the advantages that Kubernetes provides is the ability to manage various environments easier and better than traditional deployment strategies. For most nontrivial applications, you have test, staging, and production environments. You can spin up a separate cluster of resources, such as VMs, with the same configuration in staging and production, but that can be costly and managing the differences between the environments can be difficult.
|
||||
|
||||
##### Kubernetes includes a cool feature called [namespaces][4], which enable you to manage different environments within the same cluster. For example, you can have different test and staging environments in the same cluster of machines, potentially saving resources. You can also run different types of server, batch, or other jobs in the same cluster without worrying about them affecting each other.
|
||||
|
||||
|
||||
|
||||
### The Default Namespace
|
||||
|
||||
Specifying the namespace is optional in Kubernetes because by default Kubernetes uses the "default" namespace. If you've just created a cluster, you can check that the default namespace exists using this command:
|
||||
```
|
||||
$ kubectl get namespaces
|
||||
NAME LABELS STATUS
|
||||
default Active
|
||||
kube-system Active
|
||||
```
|
||||
|
||||
Here you can see that the default namespace exists and is active. The status of the namespace is used later when turning down and deleting the namespace.
|
||||
|
||||
#### Creating a New Namespace
|
||||
|
||||
You create a namespace in the same way you would any other resource. Create a my-namespace.yaml file and add these contents:
|
||||
|
||||
```
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: my-namespace
|
||||
labels:
|
||||
name: my-namespace
|
||||
```
|
||||
|
||||
Then you can run this command to create it:
|
||||
```
|
||||
$ kubectl create -f my-namespace.yaml
|
||||
```
|
||||
#### Service Names
|
||||
|
||||
With namespaces you can have your apps point to static service endpoints that don't change based on the environment. For instance, your MySQL database service could be named mysql in production and staging even though it runs on the same infrastructure.
|
||||
|
||||
This works because each of the resources in the cluster will by default only "see" the other resources in the same namespace. This means that you can avoid naming collisions by creating pods, services, and replication controllers with the same names provided they are in separate namespaces. Within a namespace, short DNS names of services resolve to the IP of the service within that namespace. So for example, you might have an Elasticsearch service that can be accessed via the DNS name elasticsearch as long as the containers accessing it are located in the same namespace.
|
||||
|
||||
You can still access services in other namespaces by looking it up via the full DNS name which takes the form of SERVICE-NAME.NAMESPACE-NAME. So for example, elasticsearch.prod or elasticsearch.canary for the production and canary environments respectively.
|
||||
|
||||
#### An Example
|
||||
|
||||
Lets look at an example application. Let’s say you want to deploy your music store service MyTunes in Kubernetes. You can run the application production and staging environment as well as some one-off apps running in the same cluster. You can get a better idea of what’s going on by running some commands:
|
||||
|
||||
|
||||
|
||||
```
|
||||
~$ kubectl get namespaces
|
||||
NAME LABELS STATUS
|
||||
default Active
|
||||
mytunes-prod Active
|
||||
mytunes-staging Active
|
||||
my-other-app Active
|
||||
```
|
||||
|
||||
Here you can see a few namespaces running. Next let’s list the services in staging:
|
||||
|
||||
```
|
||||
~$ kubectl get services --namespace=mytunes-staging
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
mytunes name=mytunes,version=1 name=mytunes 10.43.250.14 80/TCP
|
||||
104.185.824.125
|
||||
mysql name=mysql name=mysql 10.43.250.63 3306/TCP
|
||||
```
|
||||
Next check production:
|
||||
```
|
||||
~$ kubectl get services --namespace=mytunes-prod
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
mytunes name=mytunes,version=1 name=mytunes 10.43.241.145 80/TCP
|
||||
104.199.132.213
|
||||
mysql name=mysql name=mysql 10.43.245.77 3306/TCP
|
||||
```
|
||||
Notice that the IP addresses are different depending on which namespace is used even though the names of the services themselves are the same. This capability makes configuring your app extremely easy—since you only have to point your app at the service name—and has the potential to allow you to configure your app exactly the same in your staging or test environments as you do in production.
|
||||
|
||||
#### Caveats
|
||||
|
||||
While you can run staging and production environments in the same cluster and save resources and money by doing so, you will need to be careful to set up resource limits so that your staging environment doesn't starve production for CPU, memory, or disk resources. Setting resource limits properly, and testing that they are working takes a lot of time and effort so unless you can measurably save money by running production in the same cluster as staging or test, you may not really want to do that.
|
||||
|
||||
Whether or not you run staging and production in the same cluster, namespaces are a great way to partition different apps within the same cluster. Namespaces will also serve as a level where you can apply resource limits so look for more resource management features at the namespace level in the future.
|
||||
|
||||
\- Posted by Ian Lewis, Developer Advocate at Google
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Weekly Kubernetes Community Hangout Notes - July 31 2015 "
|
||||
date: Wednesday, August 04, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
Every week the Kubernetes contributing community meet virtually over Google Hangouts. We want anyone who's interested to know what's discussed in this forum.
|
||||
|
||||
Here are the notes from today's meeting:
|
||||
|
||||
|
||||
|
||||
* Private Registry Demo - Muhammed
|
||||
|
||||
* Run docker-registry as an RC/Pod/Service
|
||||
|
||||
* Run a proxy on every node
|
||||
|
||||
* Access as localhost:5000
|
||||
|
||||
* Discussion:
|
||||
|
||||
* Should we back it by GCS or S3 when possible?
|
||||
|
||||
* Run real registry backed by $object_store on each node
|
||||
|
||||
* DNS instead of localhost?
|
||||
|
||||
* disassemble image strings?
|
||||
|
||||
* more like DNS policy?
|
||||
* Running Large Clusters - Joe
|
||||
|
||||
* Samsung keen to see large scale O(1000)
|
||||
|
||||
* Starting on AWS
|
||||
|
||||
* RH also interested - test plan needed
|
||||
|
||||
* Plan for next week: discuss working-groups
|
||||
|
||||
* If you are interested in joining conversation on cluster scalability send mail to [joe@0xBEDA.com][4]
|
||||
* Resource API Proposal - Clayton
|
||||
|
||||
* New stuff wants more info on resources
|
||||
|
||||
* Proposal for resources API - ask apiserver for info on pods
|
||||
|
||||
* Send feedback to: #11951
|
||||
|
||||
* Discussion on snapshot vs time-series vs aggregates
|
||||
* Containerized kubelet - Clayton
|
||||
|
||||
* Open pull
|
||||
|
||||
* Docker mount propagation - RH carries patches
|
||||
|
||||
* Big issues around whole bootstrap of the system
|
||||
|
||||
* dual: boot-docker/system-docker
|
||||
|
||||
* Kube-in-docker is really nice, but maybe not critical
|
||||
|
||||
* Do the small stuff to make progress
|
||||
|
||||
* Keep pressure on docker
|
||||
* Web UI (preilly)
|
||||
|
||||
* Where does web UI stand?
|
||||
|
||||
* OK to split it back out
|
||||
|
||||
* Use it as a container image
|
||||
|
||||
* Build image as part of kube release process
|
||||
|
||||
* Vendor it back in? Maybe, maybe not.
|
||||
|
||||
* Will DNS be split out?
|
||||
|
||||
* Probably more tightly integrated, instead
|
||||
|
||||
* Other potential spin-outs:
|
||||
|
||||
* apiserver
|
||||
|
||||
* clients
|
||||
|
|
@ -0,0 +1,163 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Performance Measurements and Roadmap "
|
||||
date: Friday, September 10, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
No matter how flexible and reliable your container orchestration system is, ultimately, you have some work to be done, and you want it completed quickly. For big problems, a common answer is to just throw more machines at the problem. After all, more compute = faster, right?
|
||||
|
||||
|
||||
Interestingly, adding more nodes is a little like the [tyranny of the rocket equation][4] \- in some systems, adding more machines can actually make your processing slower. However, unlike the rocket equation, we can do better. Kubernetes in v1.0 version supports clusters with up to 100 nodes. However, we have a goal to 10x the number of nodes we will support by the end of 2015. This blog post will cover where we are and how we intend to achieve the next level of performance.
|
||||
|
||||
|
||||
##### What do we measure?
|
||||
|
||||
The first question we need to answer is: “what does it mean that Kubernetes can manage an N-node cluster?” Users expect that it will handle all operations “reasonably quickly,” but we need a precise definition of that. We decided to define performance and scalability goals based on the following two metrics:
|
||||
|
||||
1. 1.*“API-responsiveness”*: 99% of all our API calls return in less than 1 second
|
||||
|
||||
2. 2.*“Pod startup time”*: 99% of pods (with pre-pulled images) start within 5 seconds
|
||||
|
||||
|
||||
Note that for “pod startup time” we explicitly assume that all images necessary to run a pod are already pre-pulled on the machine where it will be running. In our experiments, there is a high degree of variability (network throughput, size of image, etc) between images, and these variations have little to do with Kubernetes’ overall performance.
|
||||
|
||||
|
||||
The decision to choose those metrics was made based on our experience spinning up 2 billion containers a week at Google. We explicitly want to measure the latency of user-facing flows since that’s what customers will actually care about.
|
||||
|
||||
|
||||
##### How do we measure?
|
||||
|
||||
To monitor performance improvements and detect regressions we set up a continuous testing infrastructure. Every 2-3 hours we create a 100-node cluster from [HEAD][5] and run our scalability tests on it. We use a GCE n1-standard-4 (4 cores, 15GB of RAM) machine as a master and n1-standard-1 (1 core, 3.75GB of RAM) machines for nodes.
|
||||
|
||||
|
||||
In scalability tests, we explicitly focus only on the full-cluster case (full N-node cluster is a cluster with 30 * N pods running in it) which is the most demanding scenario from a performance point of view. To reproduce what a customer might actually do, we run through the following steps:
|
||||
|
||||
* Populate pods and replication controllers to fill the cluster
|
||||
|
||||
* Generate some load (create/delete additional pods and/or replication controllers, scale the existing ones, etc.) and record performance metrics
|
||||
|
||||
* Stop all running pods and replication controllers
|
||||
|
||||
* Scrape the metrics and check whether they match our expectations
|
||||
|
||||
|
||||
It is worth emphasizing that the main parts of the test are done on full clusters (30 pods per node, 100 nodes) - starting a pod in an empty cluster, even if it has 100 nodes will be much faster.
|
||||
|
||||
|
||||
To measure pod startup latency we are using very simple pods with just a single container running the “gcr.io/google_containers/pause:go” image, which starts and then sleeps forever. The container is guaranteed to be already pre-pulled on nodes (we use it as the so-called pod-infra-container).
|
||||
|
||||
|
||||
##### Performance data
|
||||
|
||||
The following table contains percentiles (50th, 90th and 99th) of pod startup time in 100-node clusters which are 10%, 25%, 50% and 100% full.
|
||||
|
||||
|
||||
| | 10%-full |25%-full | 50%-full | 100%-full |
|
||||
| ------------ | ------------ | ------------ | ------------ | ------------ |
|
||||
|50th percentile | .90s | 1.08s | 1.33s | 1.94s |
|
||||
|90th percentile | 1.29s | 1.49s | 1.72s | 2.50s |
|
||||
| 99th percentile | 1.59s | 1.86s | 2.56s | 4.32s |
|
||||
{: .post-table}
|
||||
|
||||
As for api-responsiveness, the following graphs present 50th, 90th and 99th percentiles of latencies of API calls grouped by kind of operation and resource type. However, note that this also includes internal system API calls, not just those issued by users (in this case issued by the test itself).
|
||||
|
||||
|
||||
|
||||
![get.png][6]![put.png][7]
|
||||
|
||||
|
||||
|
||||
![delete.png][8]![post.png][9]
|
||||
|
||||
![list.png][10]
|
||||
|
||||
|
||||
Some resources only appear on certain graphs, based on what was running during that operation (e.g. no namespace was put at that time).
|
||||
|
||||
|
||||
As you can see in the results, we are ahead of target for our 100-node cluster with pod startup time even in a fully-packed cluster occurring 14% faster in the 99th percentile than 5 seconds. It’s interesting to point out that LISTing pods is significantly slower than any other operation. This makes sense: in a full cluster there are 3000 pods and each of pod is roughly few kilobytes of data, meaning megabytes of data that need to processed for each LIST.
|
||||
|
||||
|
||||
#####Work done and some future plans
|
||||
|
||||
The initial performance work to make 100-node clusters stable enough to run any tests on them involved a lot of small fixes and tuning, including increasing the limit for file descriptors in the apiserver and reusing tcp connections between different requests to etcd.
|
||||
|
||||
|
||||
However, building a stable performance test was just step one to increasing the number of nodes our cluster supports by tenfold. As a result of this work, we have already taken on significant effort to remove future bottlenecks, including:
|
||||
|
||||
* Rewriting controllers to be watch-based: Previously they were relisting objects of a given type every few seconds, which generated a huge load on the apiserver.
|
||||
|
||||
* Using code generators to produce conversions and deep-copy functions: Although the default implementation using Go reflections are very convenient, they proved to be extremely slow, as much as 10X in comparison to the generated code.
|
||||
|
||||
* Adding a cache to apiserver to avoid deserialization of the same data read from etcd multiple times
|
||||
|
||||
* Reducing frequency of updating statuses: Given the slow changing nature of statutes, it only makes sense to update pod status only on change and node status only every 10 seconds.
|
||||
|
||||
* Implemented watch at the apiserver instead of redirecting the requests to etcd: We would prefer to avoid watching for the same data from etcd multiple times, since, in many cases, it was filtered out in apiserver anyway.
|
||||
|
||||
|
||||
Looking further out to our 1000-node cluster goal, proposed improvements include:
|
||||
|
||||
|
||||
* Moving events out from etcd: They are more like system logs and are neither part of system state nor are crucial for Kubernetes to work correctly.
|
||||
|
||||
* Using better json parsers: The default parser implemented in Go is very slow as it is based on reflection.
|
||||
|
||||
* Rewriting the scheduler to make it more efficient and concurrent
|
||||
|
||||
* Improving efficiency of communication between apiserver and Kubelets: In particular, we plan to reduce the size of data being sent on every update of node status.
|
||||
|
||||
|
||||
This is by no means an exhaustive list. We will be adding new elements (or removing existing ones) based on the observed bottlenecks while running the existing scalability tests and newly-created ones. If there are particular use cases or scenarios that you’d like to see us address, please join in!
|
||||
|
||||
- We have weekly meetings for our Kubernetes Scale Special Interest Group on Thursdays 11am PST where we discuss ongoing issues and plans for performance tracking and improvements.
|
||||
- If you have specific performance or scalability questions before then, please join our scalability special interest group on Slack: https://kubernetes.slack.com/messages/sig-scale
|
||||
- General questions? Feel free to join our Kubernetes community on Slack: https://kubernetes.slack.com/messages/kubernetes-users/
|
||||
- Submit a pull request or file an issue! You can do this in our GitHub repository. Everyone is also enthusiastically encouraged to contribute with their own experiments (and their result) or PR contributions improving Kubernetes.
|
||||
\- Wojciech Tyczynski, Google Software Engineer
|
||||
|
||||
[1]: http://kubernetes.io/images/nav_logo.svg
|
||||
[2]: http://kubernetes.io/docs/
|
||||
[3]: http://blog.kubernetes.io/
|
||||
[4]: http://www.nasa.gov/mission_pages/station/expeditions/expedition30/tryanny.html
|
||||
[5]: https://github.com/kubernetes/kubernetes
|
||||
[6]: https://lh4.googleusercontent.com/NrKLoz2iB-TNdOxISL7OcqquCKL-MijDBCokf-u4ASAqgmo6zT7ZU24mXDvIwUUlRsFSsL3KF17dEAfUT41TSgNPvId5HN5ELQTXJSSBF0dp9EOccx4Y4WZ9fC9v9B_kCA=s1600
|
||||
[7]: https://lh4.googleusercontent.com/53AtIdoGQ477Ju0FD4S76xbZs490JnmibhSZh67aq1-MU4Jw4B-7FBgzvFoJXHcAMeSU9r3bzJHpBFAfcSf7FIS3JGZ4TiAiHucyjH3ErrarKrwYNFopvxYSBo0qxP-U0w=s1600
|
||||
[8]: https://lh4.googleusercontent.com/-wsLEXPfgtXNlu-pDfM4c0Qvr8lU7-G2w_nSgVeqg04D7RnhgSzg6Z5-mVmIYOzTWF7XaJ0zsDZBBlyZLqj4R1fkwWq-uaKJJI8xLAQ1gYWbh5qKXr5-rzkjm6CT3kBU=s1600
|
||||
[9]: https://lh6.googleusercontent.com/It8dH6iM2ZPypZ99KSUo_kJY4DnR2QD8yGJj26TiZ3U4owyf-WXoxrDfBAc1hcSn3i3LuxE3KGlUzQOaPgH6XVjSAU9Z2zMfZCKFAxEGtuCQiKlJPX4vH2JgQf3h1BXMRJQ=s1600
|
||||
[10]: https://lh6.googleusercontent.com/6Gy-UKBZUoEwJ9iFytq-k_wrdvh6FsTJexSpn6nNnBwOvxv-Sp6PV7vmArCL22MUkz0tWH7MxhaIc-JE8YpEc0X4nDUMn-cKWF3ANHtgd2aJ5t3osoaezDe_xqjpi748Cbw=s1600
|
||||
[11]: https://kubernetes.slack.com/messages/sig-scale/
|
||||
[12]: http://blog.kubernetes.io/2015/09/kubernetes-performance-measurements-and.html "permanent link"
|
||||
[13]: https://resources.blogblog.com/img/icon18_edit_allbkg.gif
|
||||
[14]: https://www.blogger.com/post-edit.g?blogID=112706738355446097&postID=6564816295503649669&from=pencil "Edit Post"
|
||||
[15]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=6564816295503649669&target=email "Email This"
|
||||
[16]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=6564816295503649669&target=blog "BlogThis!"
|
||||
[17]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=6564816295503649669&target=twitter "Share to Twitter"
|
||||
[18]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=6564816295503649669&target=facebook "Share to Facebook"
|
||||
[19]: https://www.blogger.com/share-post.g?blogID=112706738355446097&postID=6564816295503649669&target=pinterest "Share to Pinterest"
|
||||
[20]: http://blog.kubernetes.io/search/label/containers
|
||||
[21]: http://blog.kubernetes.io/search/label/k8s
|
||||
[22]: http://blog.kubernetes.io/search/label/kubernetes
|
||||
[23]: http://blog.kubernetes.io/search/label/performance
|
||||
[24]: http://blog.kubernetes.io/2015/10/some-things-you-didnt-know-about-kubectl_28.html "Newer Post"
|
||||
[25]: http://blog.kubernetes.io/2015/08/using-kubernetes-namespaces-to-manage.html "Older Post"
|
||||
[26]: http://blog.kubernetes.io/feeds/6564816295503649669/comments/default
|
||||
[27]: https://img2.blogblog.com/img/widgets/arrow_dropdown.gif
|
||||
[28]: https://img1.blogblog.com/img/icon_feed12.png
|
||||
[29]: https://img1.blogblog.com/img/widgets/subscribe-netvibes.png
|
||||
[30]: https://www.netvibes.com/subscribe.php?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2Fposts%2Fdefault
|
||||
[31]: https://img1.blogblog.com/img/widgets/subscribe-yahoo.png
|
||||
[32]: https://add.my.yahoo.com/content?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2Fposts%2Fdefault
|
||||
[33]: http://blog.kubernetes.io/feeds/posts/default
|
||||
[34]: https://www.netvibes.com/subscribe.php?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2F6564816295503649669%2Fcomments%2Fdefault
|
||||
[35]: https://add.my.yahoo.com/content?url=http%3A%2F%2Fblog.kubernetes.io%2Ffeeds%2F6564816295503649669%2Fcomments%2Fdefault
|
||||
[36]: https://resources.blogblog.com/img/icon18_wrench_allbkg.png
|
||||
[37]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=Subscribe&widgetId=Subscribe1&action=editWidget§ionId=sidebar-right-1 "Edit"
|
||||
[38]: https://twitter.com/kubernetesio
|
||||
[39]: http://slack.k8s.io/
|
||||
[40]: http://stackoverflow.com/questions/tagged/kubernetes
|
||||
[41]: http://get.k8s.io/
|
||||
[42]: //www.blogger.com/rearrange?blogID=112706738355446097&widgetType=HTML&widgetId=HTML2&action=editWidget§ionId=sidebar-right-1 "Edit"
|
||||
[43]: javascript:void(0)
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Some things you didn’t know about kubectl "
|
||||
date: Thursday, October 28, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
[kubectl](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl-overview.md) is the command line tool for interacting with Kubernetes clusters. Many people use it every day to deploy their container workloads into production clusters. But there’s more to kubectl than just `kubectl create -f or kubectl rolling-update`. kubectl is a veritable multi-tool of container orchestration and management. Below we describe some of the features of kubectl that you may not have seen.
|
||||
|
||||
**Important Note** : Most of these features are part of the upcoming 1.1 release of Kubernetes. They are not present in the current stable 1.0.x release series.
|
||||
|
||||
|
||||
##### Run interactive commands
|
||||
|
||||
`kubectl run` has been in kubectl since the 1.0 release, but recently we added the ability to run interactive containers in your cluster. That means that an interactive shell in your Kubernetes cluster is as close as:
|
||||
|
||||
```
|
||||
$> kubectl run -i --tty busybox --image=busybox --restart=Never -- sh
|
||||
Waiting for pod default/busybox-tv9rm to be running, status is Pending, pod ready: false
|
||||
Waiting for pod default/busybox-tv9rm to be running, status is Running, pod ready: false
|
||||
$> # ls
|
||||
bin dev etc home proc root sys tmp usr var
|
||||
$> # exit
|
||||
```
|
||||
The above `kubectl` command is equivalent to `docker run -i -t busybox sh.` Sadly we mistakenly used `-t` for template in kubectl 1.0, so we need to retain backwards compatibility with existing CLI user. But the existing use of `-t` is deprecated and we’ll eventually shorten `--tty` to `-t`.
|
||||
|
||||
In this example, `-i` indicates that you want an allocated `stdin` for your container and indicates that you want an interactive session, `--restart=Never` indicates that the container shouldn’t be restarted after you exit the terminal and `--tty` requests that you allocate a TTY for that session.
|
||||
|
||||
|
||||
##### View your Pod’s logs
|
||||
|
||||
Sometimes you just want to watch what’s going on in your server. For this, `kubectl logs` is the subcommand to use. Adding the -f flag lets you live stream new logs to your terminal, just like tail -f.
|
||||
$> kubectl logs -f redis-izl09
|
||||
|
||||
##### Attach to existing containers
|
||||
|
||||
In addition to interactive execution of commands, you can now also attach to any running process. Like kubectl logs, you’ll get stderr and stdout data, but with attach, you’ll also be able to send stdin from your terminal to the program. Awesome for interactive debugging, or even just sending ctrl-c to a misbehaving application.
|
||||
|
||||
$> kubectl attach redis -i
|
||||
|
||||
|
||||
1:C 12 Oct 23:05:11.848 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
|
||||
|
||||
```
|
||||
_._
|
||||
_.-``__''-._
|
||||
_.-`` `. `_. ''-._ Redis 3.0.3 (00000000/0) 64 bit
|
||||
.-`` .-```. ```\/ _.,_ ''-._
|
||||
( ' , .-` | `, ) Running in standalone mode
|
||||
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6379
|
||||
| `-._ `._ / _.-' | PID: 1
|
||||
`-._ `-._ `-./ _.-' _.-'
|
||||
|`-._`-._ `-.__.-' _.-'_.-'|
|
||||
| `-._`-._ _.-'_.-' | http://redis.io
|
||||
`-._ `-._`-.__.-'_.-' _.-'
|
||||
|`-._`-._ `-.__.-' _.-'_.-'|
|
||||
| `-._`-._ _.-'_.-' |
|
||||
`-._ `-._`-.__.-'_.-' _.-'
|
||||
`-._ `-.__.-' _.-'
|
||||
`-._ _.-'
|
||||
`-.__.-'
|
||||
|
||||
1:M 12 Oct 23:05:11.849 # Server started, Redis version 3.0.3
|
||||
```
|
||||
|
||||
##### Forward ports from Pods to your local machine
|
||||
|
||||
Often times you want to be able to temporarily communicate with applications in your cluster without exposing them to the public internet for security reasons. To achieve this, the port-forward command allows you to securely forward a port on your local machine through the kubernetes API server to a Pod running in your cluster. For example:
|
||||
|
||||
`$> kubectl port-forward redis-izl09 6379`
|
||||
|
||||
Opens port 6379 on your local machine and forwards communication to that port to the Pod or Service in your cluster. For example, you can use the ‘telnet’ command to poke at a Redis service in your cluster:
|
||||
|
||||
```
|
||||
$> telnet localhost 6379
|
||||
INCR foo
|
||||
:1
|
||||
INCR foo
|
||||
:2
|
||||
```
|
||||
|
||||
### Execute commands inside an existing container
|
||||
In addition to being able to attach to existing processes inside a container, the “exec” command allows you to spawn new processes inside existing containers. This can be useful for debugging, or examining your pods to see what’s going on inside without interrupting a running service. `kubectl exec` is different from `kubectl run`, because it runs a command inside of an _existing_ container, rather than spawning a new container for execution.
|
||||
|
||||
```
|
||||
$> kubectl exec redis-izl09 -- ls /
|
||||
bin
|
||||
boot
|
||||
data
|
||||
dev
|
||||
entrypoint.sh
|
||||
etc
|
||||
home
|
||||
```
|
||||
|
||||
|
||||
##### Add or remove Labels
|
||||
|
||||
Sometimes you want to dynamically add or remove labels from a Pod, Service or Replication controller. Maybe you want to add an existing Pod to a Service, or you want to remove a Pod from a Service. No matter what you want, you can easily and dynamically add or remove labels using the `kubectl label` subcommand:
|
||||
|
||||
`$> kubectl label pods redis-izl09 mylabel=awesome `
|
||||
`pod "redis-izl09" labeled`
|
||||
|
||||
|
||||
##### Add annotations to your objects
|
||||
|
||||
Just like labels, you can add or remove annotations from API objects using the kubectl annotate subcommand. Unlike labels, annotations are there to help describe your object, but aren’t used to identify pods via label queries ([more details on annotations](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/annotations.md#annotations)). For example, you might add an annotation of an icon for a GUI to use for displaying your pods.
|
||||
|
||||
`$> kubectl annotate pods redis-izl09 icon-url=http://goo.gl/XXBTWq `
|
||||
`pod "redis-izl09" annotated`
|
||||
|
||||
|
||||
##### Output custom format
|
||||
|
||||
Sometimes, you want to customize the fields displayed when kubectl summarizes an object from your cluster. To do this, you can use the `custom-columns-file` format. `custom-columns-file` takes in a template file for rendering the output. Again, JSONPath expressions are used in the template to specify fields in the API object. For example, the following template first shows the number of restarts, and then the name of the object:
|
||||
|
||||
```
|
||||
$> cat cols.tmpl
|
||||
RESTARTS NAME
|
||||
.status.containerStatuses[0].restartCount .metadata.name
|
||||
```
|
||||
|
||||
If you pass this template to the `kubectl get pods` command you get a list of pods with the specified fields displayed.
|
||||
|
||||
```
|
||||
$> kubectl get pods redis-izl09 -o=custom-columns-file --template=cols.tmpl RESTARTS NAME
|
||||
0 redis-izl09
|
||||
1 redis-abl42
|
||||
```
|
||||
|
||||
##### Easily manage multiple Kubernetes clusters
|
||||
|
||||
If you’re running multiple Kubernetes clusters, you know it can be tricky to manage all of the credentials for the different clusters. Using the `kubectl config` subcommands, switching between different clusters is as easy as:
|
||||
|
||||
$> kubectl config use-context
|
||||
|
||||
Not sure what clusters are available? You can view currently configured clusters with:
|
||||
|
||||
$> kubectl config view
|
||||
|
||||
Phew, that outputs a lot of text. To restrict it down to only the things we’re interested in, we can use a JSONPath template:
|
||||
|
||||
$> kubectl config view -o jsonpath="{.context[*].name}"
|
||||
|
||||
Ahh, that’s better.
|
||||
|
||||
|
||||
##### Conclusion
|
||||
|
||||
So there you have it, nine new and exciting things you can do with your Kubernetes cluster and the kubectl command line. If you’re just getting started with Kubernetes, check out [Google Container Engine](https://cloud.google.com/container-engine/) or other ways to [get started with Kubernetes](http://kubernetes.io/gettingstarted/).
|
||||
|
||||
- Brendan Burns, Google Software Engineer
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Creating a Raspberry Pi cluster running Kubernetes, the shopping list (Part 1) "
|
||||
date: Thursday, November 25, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
At Devoxx Belgium and Devoxx Morocco, Ray Tsang and I showed a Raspberry Pi cluster we built at Quintor running HypriotOS, Docker and Kubernetes. For those who did not see the talks, you can check out [an abbreviated version of the demo](https://www.youtube.com/watch?v=AAS5Mq9EktI) or the full talk by Ray on [developing and deploying Java-based microservices](https://www.youtube.com/watch?v=kT1vmK0r184) in Kubernetes. While we received many compliments on the talk, the most common question was about how to build a Pi cluster themselves! We’ll be doing just that, in two parts. This first post will cover the shopping list for the cluster, and the second will show you how to get it up and running . . .
|
||||
|
||||
### Wait! Why the heck build a Raspberry Pi cluster running Kubernetes?
|
||||
|
||||
We had two big reasons to build the Pi cluster at Quintor. First of all we wanted to experiment with container technology at scale on real hardware. You can try out container technology using virtual machines, but Kubernetes runs great on on bare metal too. To explore what that’d be like, we built a Raspberry Pi cluster just like we would build a cluster of machines in a production datacenter. This allowed us to understand and simulate how Kubernetes would work when we move it to our data centers.
|
||||
|
||||
Secondly, we did not want to blow the budget to do this exploration. And what is cheaper than a Raspberry Pi! If you want to build a cluster comprising many nodes, each node should have a good cost to performance ratio. Our Pi cluster has 20 CPU cores, which is more than many servers, yet cost us less than $400. Additionally, the total power consumption is low and the form factor is small, which is great for these kind of demo systems.
|
||||
|
||||
So, without further ado, let’s get to the hardware.
|
||||
|
||||
### The Shopping List:
|
||||
|
||||
| | | |
|
||||
| ------------ | ------------ | ------------ |
|
||||
| 5 | Raspberry Pi 2 model B | [~$200](https://www.raspberrypi.org/products/raspberry-pi-2-model-b/) |
|
||||
| 5 | 16 GB micro SD-card class 10 | ~ $45 |
|
||||
| 1 | D-Link Switch GO-SW-8E 8-Port | [~$15](http://www.dlink.com/uk/en/home-solutions/connect/go/go-sw-8e) |
|
||||
| 1 | Anker 60W 6-Port PowerPort USB Charger (white) | [~$35](http://www.ianker.com/product/A2123122) |
|
||||
| 3 | ModMyPi Multi-Pi Stackable Raspberry Pi Case | [~$60](http://www.modmypi.com/raspberry-pi/cases/multi-pi-stacker/multi-pi-stackable-raspberry-pi-case) |
|
||||
| 1 | ModMyPi Multi-Pi Stackable Raspberry Pi Case - Bolt Pack | [~$7](http://www.modmypi.com/raspberry-pi/cases/multi-pi-stacker/multi-pi-stackable-raspberry-pi-case-bolt-pack) |
|
||||
| 5 | Micro USB cable (white) 1ft long | ~ $10 |
|
||||
| 5 | UTP cat5 cable (white) 1ft long | ~ $10 |
|
||||
{: .post-table}
|
||||
|
||||
<br>
|
||||
For a total of approximately $380 you will have a building set to create a Raspberry Pi cluster like we built! [1](#1)
|
||||
|
||||
|
||||
### Some of our considerations
|
||||
|
||||
We used the Raspberry Pi 2 model B boards in our cluster rather than the Pi 1 boards because of the CPU power (quadcore @ 900MHz over a dualcore @ 700MHz) and available memory (1 GB over 512MB). These specs allowed us to run multiple containers on each Pi to properly experiment with Kubernetes.
|
||||
|
||||
We opted for a 16GB SD-card in each Pi to be at the save side on filesystem storage. In hindsight, 8GB seemed to be enough.
|
||||
|
||||
Note the GeauxRobot Stackable Case looks like an alternative for the ModMyPi Stackable Case, but it’s smaller which can cause a problem fitting in the Anker USB Adapter and placing the D-Link Network Switch. So, we stuck with the ModMyPi case.
|
||||
|
||||
|
||||
### Putting it together
|
||||
|
||||
Building the Raspberry Pi cluster is pretty straight forward. Most of the work is putting the stackable casing together and mounting the Pi boards on the plexiglass panes. We mounted the network switch and USB Adapter using double side foam tape, which feels strong enough for most situations. Finally, we connected the USB and UTP cables. Next, we installed HypriotOS on every Pi. HypriotOS is a Raspbian based Linux OS for Raspberry Pi’s extended with Docker support. The Hypriot team has an excellent tutorial on [Getting started with Docker on your Raspberry Pi](http://blog.hypriot.com/getting-started-with-docker-on-your-arm-device/). Follow this tutorial to get Linux and Docker running on all Pi’s.
|
||||
|
||||
With that, you’re all set! Next up will be running Kubernetes on the Raspberry Pi cluster. We’ll be covering this the [next post](http://blog.kubernetes.io/2015/12/creating-raspberry-pi-cluster-running.html), so stay tuned!
|
||||
|
||||
|
||||
Arjen Wassink, Java Architect and Team Lead, Quintor
|
||||
|
||||
|
||||
|
||||
** ## [1] ## **
|
||||
**[1] **To save ~$90 by making a stack of four Pi’s (instead of five). This also means you can use a 5-Port Anker USB Charger instead of the 6-Port one.
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
layout: blog
|
||||
permalink: /blog/:year/:month/:title
|
||||
title: " Kubernetes 1.1 Performance upgrades, improved tooling and a growing community "
|
||||
date: Tuesday, November 09, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Since the Kubernetes 1.0 release in July, we’ve seen tremendous adoption by companies building distributed systems to manage their container clusters. We’re also been humbled by the rapid growth of the community who help make Kubernetes better everyday. We have seen commercial offerings such as Tectonic by CoreOS and RedHat Atomic Host emerge to deliver deployment and support of Kubernetes. And a growing ecosystem has added Kubernetes support including tool vendors such as Sysdig and Project Calico.
|
||||
|
||||
With the help of hundreds of contributors, we’re proud to announce the availability of Kubernetes 1.1, which offers major performance upgrades, improved tooling, and new features that make applications even easier to build and deploy.
|
||||
|
||||
Some of the work we’d like to highlight includes:
|
||||
|
||||
- **Substantial performance improvements** : We have architected Kubernetes from day one to handle Google-scale workloads, and our customers have put it through their paces. In Kubernetes 1.1, we have made further investments to ensure that you can run in extremely high-scale environments; later this week, we will be sharing examples of running thousand node clusters, and running over a million QPS against a single cluster.
|
||||
|
||||
- **Significant improvement in network throughput** : Running Google-scale workloads also requires Google-scale networking. In Kubernetes 1.1, we have included an option to use native IP tables offering an 80% reduction in tail latency, an almost complete elimination of CPU overhead and improvements in reliability and system architecture ensuring Kubernetes can handle high-scale throughput well into the future.
|
||||
|
||||
- **Horizontal pod autoscaling (Beta)**: Many workloads can go through spiky periods of utilization, resulting in uneven experiences for your users. Kubernetes now has support for horizontal pod autoscaling, meaning your pods can scale up and down based on CPU usage. Read more about [Horizontal pod autoscaling](http://kubernetes.io/v1.1/docs/user-guide/horizontal-pod-autoscaler.html).
|
||||
|
||||
- **HTTP load balancer (Beta)**: Kubernetes now has the built-in ability to route HTTP traffic based on the packets introspection. This means you can have ‘http://foo.com/bar’ go to one service, and ‘http://foo.com/meep’ go to a completely independent service. Read more about the [Ingress object](http://kubernetes.io/v1.1/docs/user-guide/ingress.html).
|
||||
|
||||
- **Job objects (Beta)**: We’ve also had frequent request for integrated batch jobs, such as processing a batch of images to create thumbnails or a particularly large data file that has been broken down into many chunks. [Job objects](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/jobs.md#writing-a-job-spec) introduces a new API object that runs a workload, restarts it if it fails, and keeps trying until it’s successfully completed. Read more about the[Job object](http://kubernetes.io/v1.1/docs/user-guide/jobs.html).
|
||||
|
||||
- **New features to shorten the test cycle for developers** : We continue to work on making developing for applications for Kubernetes quick and easy. Two new features that speeds developer’s workflows include the ability to run containers interactively, and improved schema validation to let you know if there are any issues with your configuration files before you deploy them.
|
||||
|
||||
- **Rolling update improvements** : Core to the DevOps movement is being able to release new updates without any affect on a running service. Rolling updates now ensure that updated pods are healthy before continuing the update.
|
||||
|
||||
- And many more. For a complete list of updates, see the [1.1. release](https://github.com/kubernetes/kubernetes/releases) notes on GitHub
|
||||
|
||||
|
||||
|
||||
Today, we’re also proud to mark the inaugural Kubernetes conference, [KubeCon](https://kubecon.io/), where some 400 community members along with dozens of vendors are in attendance supporting the Kubernetes project.
|
||||
|
||||
We’d love to highlight just a few of the many partners making Kubernetes better:
|
||||
|
||||
> “We are betting our major product, Tectonic – which enables any company to deploy, manage and secure its containers anywhere – on Kubernetes because we believe it is the future of the data center. The release of Kubernetes 1.1 is another major milestone that will create more widespread adoption of distributed systems and containers, and puts us on a path that will inevitably lead to a whole new generation of products and services.” – Alex Polvi, CEO, CoreOS.
|
||||
|
||||
> “Univa’s customers are looking for scalable, enterprise-caliber solutions to simplify managing container and non-container workloads in the enterprise. We selected Kubernetes as a foundational element of our new Navops suite which will help IT and DevOps rapidly integrate containerized workloads into their production systems and extend these workloads into cloud services.” – Gary Tyreman, CEO, Univa.
|
||||
|
||||
> “The tremendous customer demand we’re seeing to run containers at scale with Kubernetes is a critical element driving growth in our professional services business at Redapt. As a trusted advisor, it’s great to have a tool like Kubernetes in our tool belt to help our customers achieve their objectives.” – Paul Welch, SR VP Cloud Solutions, Redapt
|
||||
|
||||
>
|
||||
|
||||
As we mentioned above, we would love your help:
|
||||
|
||||
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)
|
||||
- Connect with the community on [Slack](http://slack.kubernetes.io/)
|
||||
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates
|
||||
- Post questions (or answer questions) on Stackoverflow
|
||||
- Get started running, deploying, and using Kubernetes [guides](http://kubernetes.io/gettingstarted/)
|
||||
|
||||
But, most of all, just let us know how you are transforming your business using Kubernetes, and how we can help you do it even faster. Thank you for your support!
|
||||
|
||||
- David Aronchick, Senior Product Manager for Kubernetes and Google Container Engine
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes as Foundation for Cloud Native PaaS "
|
||||
date: Wednesday, November 03, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
With Kubernetes continuing to gain momentum as a critical tool for building and scaling container based applications, we’ve been thrilled to see a growing number of platform as a service (PaaS) offerings adopt it as a foundation. PaaS developers have been drawn to Kubernetes by its rapid rate of maturation, the soundness of its core architectural concepts, and the strength of its contributor community. The [Kubernetes ecosystem](http://blog.kubernetes.io/2015/07/the-growing-kubernetes-ecosystem.html) continues to grow, and these PaaS projects are great additions to it.
|
||||
|
||||
[](http://1.bp.blogspot.com/-xX93tnoIlGo/Vjj2fSc_CDI/AAAAAAAAAi0/lvTkT9jyFog/s1600/k8%2Bipaas%2B1.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
> “[Deis](http://deis.io/) is the leading Docker PaaS with over a million downloads, actively used by companies like Mozilla, The RealReal, ShopKeep and Coinbase. Deis provides software teams with a turn-key platform for running containers in production, featuring the ability to build and store Docker images, production-grade load balancing, a streamlined developer interface and an ops-ready suite of logging and monitoring infrastructure backed by world-class 24x7x365 support. After a community-led evaluation of alternative orchestrators, it was clear that Kubernetes represents a decade of experience running containers at scale inside Google. The Deis project is proud to be rebasing onto Kubernetes and is thrilled to join its vibrant community." - Gabriel Monroy, CTO of [Engine Yard](https://www.engineyard.com/), Inc.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[](http://1.bp.blogspot.com/-1XZFGRHGb34/Vjj2wUtA6pI/AAAAAAAAAi8/SD-qRhVIiIs/s1600/k8%2Bipaas%2B2.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[OpenShift](http://www.openshift.org/) by Red Hat helps organizations accelerate application delivery by enabling development and IT operations teams to be more agile, responsive and efficient. OpenShift Enterprise 3 is the first fully supported, enterprise-ready, web-scale container application platform that natively integrates the Docker container runtime and packaging format, Kubernetes container orchestration and management engine, on a foundation of Red Hat Enterprise Linux 7, all fully supported by Red Hat from the operating system to application runtimes.
|
||||
|
||||
|
||||
> “Kubernetes provides OpenShift users with a powerful model for application orchestration, leveraging concepts like pods and services, to deploy (micro)services that inherently span multiple containers and application topologies that will require wiring together multiple services. Pods can be optionally mapped to storage, which means you can run both stateful and stateless services in OpenShift. Kubernetes also provides a powerful declarative management model to manage the lifecycle of application containers. Customers can then use Kubernetes’ integrated scheduler to deploy and manage containers across multiple hosts. As a leading contributor to both the Docker and Kubernetes open source projects, Red Hat is not just adopting these technologies but actively building them upstream in the community.” - Joe Fernandes, Director of Product Management for Red Hat OpenShift.
|
||||
|
||||
|
||||
|
||||
|
||||
[](http://2.bp.blogspot.com/-t3L1CANyhUs/Vjj28Zpf9WI/AAAAAAAAAjE/Ef-PLLmHGvU/s1600/k8%2Bipaas%2B3.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Huawei, a leading global ICT technology solution provider, will offer container as a service (CaaS) built on Kubernetes in the public cloud for customers with Docker based applications. Huawei CaaS services will manage multiple clusters across data centers, and deploy, monitor and scale containers with high availability and high resource utilization for their customers. For example, one of Huawei’s current software products for their telecom customers utilizes tens of thousands of modules and hundreds of instances in virtual machines. By moving to a container based PaaS platform powered by Kubernetes, Huawei is migrating this product into a micro-service based, cloud native architecture. By decoupling the modules, they’re creating a high performance, scalable solution that runs hundreds, even thousands of containers in the system. Decoupling existing heavy modules could have been a painful exercise. However, using several key concepts introduced by Kubernetes, such as pods, services, labels, and proxies, Huawei has been able to re-architect their software with great ease.
|
||||
|
||||
Huawei has made Kubernetes the core runtime engine for container based applications/services, and they’ve been building other PaaS components or capabilities around Kubernetes, such as user access management, composite API, Portal and multiple cluster management. Additionally, as part of the migration to the new platform, they’re enhancing their PaaS solution in the areas of advanced scheduling algorithm, multi tenant support and enhanced container network communication to support customer needs.
|
||||
|
||||
|
||||
> “Huawei chose Kubernetes as the foundation for our offering because we like the abstract concepts of services, pod and label for modeling and distributed applications. We developed an application model based on these concepts to model existing complex applications which works well for moving legacy applications into the cloud. In addition, Huawei intends for our PaaS platform to support many scenarios, and Kubernetes’ flexible architecture with its plug-in capability is key to our platform architecture.”- Ying Xiong, Chief Architect of PaaS at Huawei.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[](http://2.bp.blogspot.com/-Ys0Zn4IQzn0/Vjj3JIE0BVI/AAAAAAAAAjM/ktwltzVa1GE/s1600/k8%2Bipaas%2B4.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[Gondor](https://gondor.io/)is a PaaS with a focus on application hosting throughout the lifecycle, from development to testing to staging to production. It supports Python, Go, and Node.js applications as well as technologies such as Postgres, Redis and Elasticsearch. The Gondor team recently re-architected Gondor to incorporate Kubernetes, and discussed this in a [blog post.](https://gondor.io/blog/2015/07/21/rebuilding-gondor-kubernetes/)
|
||||
|
||||
|
||||
> “There are two main reasons for our move to Kubernetes: One, by taking care of the lower layers in a truly scalable fashion, Kubernetes lets us focus on providing a great product at the application layer. Two, the portability of Kubernetes allows us to expand our PaaS offering to on-premises, private cloud and a multitude of alternative infrastructure providers.” - Brian Rosner, Chief Architect at Eldarion (the driving force behind Gondor)
|
||||
|
||||
- Martin Buhr, Google Business Product Manager
|
||||
|
|
@ -0,0 +1,236 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Monitoring Kubernetes with Sysdig "
|
||||
date: Friday, November 19, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today we’re sharing a guest post by Chris Crane from Sysdig about their monitoring integration into Kubernetes. _
|
||||
|
||||
Kubernetes offers a full environment to write scalable and service-based applications. It takes care of things like container grouping, discovery, load balancing and healing so you don’t have to worry about them. The design is elegant, scalable and the APIs are a pleasure to use.
|
||||
|
||||
And like any new infrastructure platform, if you want to run Kubernetes in production, you’re going to want to be able to monitor and troubleshoot it. We’re big fans of Kubernetes here at Sysdig, and, well: we’re here to help.
|
||||
|
||||
Sysdig offers native visibility into Kubernetes across the full Sysdig product line. That includes [sysdig](http://www.sysdig.org/), our open source, CLI system exploration tool, and [Sysdig Cloud](https://sysdig.com/), the first and only monitoring platform designed from the ground up to support containers and microservices.
|
||||
|
||||
At a high level, Sysdig products are aware of the entire Kubernetes cluster hierarchy, including **namespaces, services, replication controllers** and **labels**. So all of the rich system and application data gathered is now available in the context of your Kubernetes infrastructure. What does this mean for you? In a nutshell, we believe Sysdig can be your go-to tool for making Kubernetes environments significantly easier to monitor and troubleshoot!
|
||||
|
||||
In this post I will quickly preview the Kubernetes visibility in both open source sysdig and Sysdig Cloud, and show off a couple interesting use cases. Let’s start with the open source solution.
|
||||
|
||||
|
||||
### Exploring a Kubernetes Cluster with csysdig
|
||||
|
||||
The easiest way to take advantage of sysdig’s Kubernetes support is by launching csysdig, the sysdig ncurses UI:
|
||||
|
||||
` > csysdig -k http://127.0.0.1:8080`
|
||||
*Note: specify the address of your Kubernetes API server with the -k command, and sysdig will poll all the relevant information, leveraging both the standard and the watch API.
|
||||
|
||||
Now that csysdig is running, hit F2 to bring up the views panel, and you'll notice the presence of a bunch of new views. The **k8s Namespaces** view can be used to see the list of namespaces and observe the amount of CPU, memory, network and disk resources each of them is using on this machine:
|
||||
|
||||
|
||||
[](http://2.bp.blogspot.com/-9kXfpo76r0k/Vkz8AkpctEI/AAAAAAAAAss/yvf9oc759Wg/s1600/sisdig%2B6.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Similarly, you can select **k8s Services** to see the same information broken up by service:
|
||||
|
||||
|
||||
[](http://2.bp.blogspot.com/-Ya1W3Z_ETcs/Vkz8AN3XtfI/AAAAAAAAAs8/HNv_TvHpfHU/s1600/sisdig%2B2.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
or **k8s Controllers** to see the replication controllers:
|
||||
|
||||
|
||||
[](http://3.bp.blogspot.com/-gGkgXRC5P6g/Vkz8A1RVyAI/AAAAAAAAAtQ/SFlHQeNrDjQ/s1600/sysdig%2B1.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
or **k8s Pods** to see the list of pods running on this machine and the resources they use:
|
||||
|
||||
|
||||
[](http://3.bp.blogspot.com/-PrDfWzi9F3c/Vkz8H6rPlII/AAAAAAAAAtc/f46tE6EKvoo/s1600/sisdig%2B7.png)
|
||||
|
||||
|
||||
|
||||
### Drill Down-Based Navigation
|
||||
A cool feature in csysdig is the ability to drill down: just select an element, click on enter and – boom – now you're looking inside it. Drill down is also aware of the Kubernetes hierarchy, which means I can start from a service, get the list of its pods, see which containers run inside one of the pods, and go inside one of the containers to explore files, network connections, processes or even threads. Check out the video below.
|
||||
|
||||
|
||||
[](http://1.bp.blogspot.com/-lQ-P2gLywlY/Vkz9MOoTgGI/AAAAAAAAAtk/UB6pW7sUbQA/s1600/image09.gif)
|
||||
|
||||
|
||||
### Actions!
|
||||
One more thing about csysdig. As [recently announced](https://sysdig.com/csysdigs-hotkeys-turning-csysdig-into-a-control-panel-for-processes-connections-and-containers/), csysdig also offers “control panel” functionality, making it possible to use hotkeys to execute command lines based on the element currently selected. So we made sure to enrich the Kubernetes views with a bunch of useful hotkeys. For example, you can delete a namespace or a service by pressing "x," or you can describe them by pressing "d."
|
||||
|
||||
My favorite hotkeys, however, are "f," to follow the logs that a pod is generating, and "b," which leverages `kubectl` exec to give you a shell inside a pod. Being brought into a bash prompt for the pod you’re observing is really useful and, frankly, a bit magic. :-)
|
||||
|
||||
So that’s a quick preview of Kubernetes in sysdig. Note though, that all of this functionality is only for a single machine. What happens if you want to monitor a distributed Kubernetes cluster? Enter Sysdig Cloud.
|
||||
|
||||
|
||||
### Monitoring Kubernetes with Sysdig Cloud
|
||||
Let’s start with a quick review of Kubernetes’ architecture. From the physical/infrastructure point of view, a Kubernetes cluster is made up of a set of **minion** machines overseen by a **master** machine. The master’s tasks include orchestrating containers across minions, keeping track of state and exposing cluster control through a REST API and a UI.
|
||||
|
||||
On the other hand, from the logical/application point of view, Kubernetes clusters are arranged in the hierarchical fashion shown in this picture:
|
||||
|
||||
[](http://1.bp.blogspot.com/-p_x0bLRdFJo/Vkz8IPR5q4I/AAAAAAAAAtg/D9UU2MfPmcI/s1600/sisdig%2B4.png)
|
||||
|
||||
|
||||
|
||||
|
||||
* All containers run inside **pods**. A pod can host a single container, or multiple cooperating containers; in the latter case, the containers in the pod are guaranteed to be co-located on the same machine and can share resources.
|
||||
* Pods typically sit behind **services** , which take care of balancing the traffic, and also expose the set of pods as a single discoverable IP address/port.
|
||||
* Services are scaled horizontally by **replication controllers** (“RCs”) which create/destroy pods for each service as needed.
|
||||
* **Namespaces** are virtual clusters that can include one or more services.
|
||||
|
||||
So just to be clear, multiple services and even multiple namespaces can be scattered across the same physical infrastructure.
|
||||
|
||||
|
||||
|
||||
After talking to hundreds of Kubernetes users, it seems that the typical cluster administrator is often interested in looking at things from the physical point of view, while service/application developers tend to be more interested in seeing things from the logical point of view.
|
||||
|
||||
|
||||
|
||||
With both these use cases in mind, Sysdig Cloud’s support for Kubernetes works like this:
|
||||
|
||||
* By automatically connecting to a Kubernetes’ cluster API Server and querying the API (both the regular and the watch API), Sysdig Cloud is able to infer both the physical and the logical structure of your microservice application.
|
||||
* In addition, we transparently extract important metadata such as labels.
|
||||
* This information is combined with our patent-pending ContainerVision technology, which makes it possible to inspect applications running inside containers without requiring any instrumentation of the container or application.
|
||||
Based on this, Sysdig Cloud can provide rich visibility and context from both an **infrastructure-centric** and an **application-centric** point of view. Best of both worlds! Let’s check out what this actually looks like.
|
||||
|
||||
|
||||
|
||||
One of the core features of Sysdig Cloud is groups, which allow you to define the hierarchy of metadata for your applications and infrastructure. By applying the proper groups, you can explore your containers based on their physical hierarchy (for example, physical cluster \> minion machine \> pod \> container) or based on their logical microservice hierarchy (for example, namespace \> replication controller \> pod \> container – as you can see in this example).
|
||||
|
||||
|
||||
|
||||
If you’re interested in the utilization of your underlying physical resource – e.g., identifying noisy neighbors – then the physical hierarchy is great. But if you’re looking to explore the performance of your applications and microservices, then the logical hierarchy is often the best place to start.
|
||||
|
||||
[](http://4.bp.blogspot.com/-80u3oSEi_Fw/Vkz8AZgE6eI/AAAAAAAAAtE/3iRDMJKBNmc/s1600/sisdig%2B5.png)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
For example: here you can see the overall performance of our WordPress service:
|
||||
|
||||
[](http://4.bp.blogspot.com/-QAsedrM2UxI/Vkz8Aas-26I/AAAAAAAAAtM/9B7Z33vUQrg/s1600/sisdig%2B3.png)
|
||||
|
||||
Keep in mind that the pods implementing this service are scattered across multiple machines, but we can still total request counts, response times and URL statistics aggregated together for this service. And don’t forget: this doesn’t require any configuration or instrumentation of wordpress, apache, or the underlying containers!
|
||||
|
||||
|
||||
|
||||
And from this view, I can now easily create alerts for these service-level metrics, and I can dig down into any individual container for deep inspection - down to the process level – whenever I want, including back in time!
|
||||
|
||||
|
||||
|
||||
### Visualizing Your Kubernetes Services
|
||||
|
||||
We’ve also included Kubernetes awareness in Sysdig Cloud’s famous topology view, at both the physical and logical level.
|
||||
|
||||
[](http://2.bp.blogspot.com/-2is-UJatmPk/Vk0AtdfvYvI/AAAAAAAAAt0/9SEsl2LCpYI/s1600/image02.gif)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[](http://2.bp.blogspot.com/-hGQtaIV9XTA/Vk0RnwtlcGI/AAAAAAAAAuM/7ndiyAWpSvU/s1600/image08.gif)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
The two pictures below show the exact same infrastructure and services. But the first one depicts the physical hierarchy, with a master node and three minion nodes; while the second one groups containers into namespaces, services and pods, while abstracting the physical location of the containers.
|
||||
|
||||
|
||||
|
||||
Hopefuly it’s self-evident how much more natural and intuitive the second (services-oriented) view is. The structure of the application and the various dependencies are immediately clear. The interactions between various microservices become obvious, despite the fact that these microservices are intermingled across our machine cluster!
|
||||
|
||||
|
||||
|
||||
### Conclusion
|
||||
|
||||
I’m pretty confident that what we’re delivering here represents a huge leap in visibility into Kubernetes environments and it won’t disappoint you. I also hope it can be a useful tool enabling you to use Kubernetes in production with a little more peace of mind. Thanks, and happy digging!
|
||||
|
||||
|
||||
|
||||
Chris Crane, VP Product, Sysdig
|
||||
|
||||
|
||||
|
||||
_You can find open source sysdig on [github](https://github.com/draios/sysdig) and at [sysdig.org](http://sysdig.org/), and you can sign up for free trial of Sysdig Cloud at [sysdig.com](http://sysdig.com/). _
|
||||
|
||||
|
||||
|
||||
_To see a live demo and meet some of the folks behind the project join us this Thursday for a [Kubernetes and Sysdig Meetup in San Francisco](http://www.meetup.com/Bay-Area-Kubernetes-Meetup/events/226574438/)._
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " One million requests per second: Dependable and dynamic distributed systems at scale "
|
||||
date: Thursday, November 11, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
Recently, I’ve gotten in the habit of telling people that building a reliable service isn’t that hard. If you give me two Compute Engine virtual machines, a Cloud Load balancer, supervisord and nginx, I can create you a static web service that will serve a static web page, effectively forever.
|
||||
|
||||
The real challenge is building agile AND reliable services. In the new world of software development it's trivial to spin up enormous numbers of machines and push software to them. Developing a successful product must _also_ include the ability to respond to changes in a predictable way, to handle upgrades elegantly and to minimize downtime for users. Missing on any one of these elements results in an _unsuccessful_ product that's flaky and unreliable. I remember a time, not that long ago, when it was common for websites to be unavailable for an hour around midnight each day as a safety window for software upgrades. My bank still does this. It’s really not cool.
|
||||
|
||||
Fortunately, for developers, our infrastructure is evolving along with the requirements that we’re placing on it. Kubernetes has been designed from the ground up to make it easy to design, develop and deploy dependable, dynamic services that meet the demanding requirements of the cloud native world.
|
||||
|
||||
To demonstrate exactly what we mean by this, I've developed a simple demo of a Container Engine cluster serving 1 million HTTP requests per second. In all honesty, serving 1 million requests per second isn’t really that exciting. In fact, it’s really so very [2013](http://googlecloudplatform.blogspot.com/2013/11/compute-engine-load-balancing-hits-1-million-requests-per-second.html).
|
||||
|
||||
[](http://4.bp.blogspot.com/-eACCKAzuQFQ/VkO1rwW1DRI/AAAAAAAAAko/zKu-19QCCBU/s1600/image01.gif)
|
||||
|
||||
|
||||
What _is_ exciting is that while successfully handling 1 million HTTP requests per second with uninterrupted availability, we have Kubernetes perform a zero-downtime rolling upgrade of the service to a new version of the software _while we're **still** serving 1 million requests per second_.
|
||||
|
||||
|
||||
[](http://2.bp.blogspot.com/-_96_QwNRHLo/VkO1oDAyLLI/AAAAAAAAAkk/B_y5Uh5ngPU/s1600/image00.gif)
|
||||
|
||||
|
||||
This is only possible due to a large number of performance tweaks and enhancements that have gone into the [Kubernetes 1.1 release](http://blog.kubernetes.io/2015/11/Kubernetes-1-1-Performance-upgrades-improved-tooling-and-a-growing-community.html). I’m incredibly proud of all of the features that our community has built into this release. Indeed in addition to making it possible to serve 1 million requests per second, we’ve also added an auto-scaler, so that you won’t even have to wake up in the middle of the night to scale your service in response to load or memory pressures.
|
||||
|
||||
If you want to try this out on your own cluster (or use the load test framework to test your own service) the code for the [demo is available on github](https://github.com/kubernetes/contrib/pull/226). And the [full video](https://www.youtube.com/watch?v=7TOWLerX0Ps) is available.
|
||||
|
||||
I hope I’ve shown you how Kubernetes can enable developers of distributed systems to achieve both reliability and agility at scale, and as always, if you’re interested in learning more, head over to [kubernetes.io](http://kubernetes.io/) or [github](https://github.com/kubernetes/kubernetes) and connect with the community on our [Slack](http://slack.kubernetes.io/) channel.
|
||||
|
||||
|
||||
"https://www.youtube.com/embed/7TOWLerX0Ps"
|
||||
|
||||
|
||||
|
||||
- Brendan Burns, Senior Staff Software Engineer, Google, Inc.
|
||||
|
|
@ -0,0 +1,201 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Creating a Raspberry Pi cluster running Kubernetes, the installation (Part 2) "
|
||||
date: Wednesday, December 22, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
At Devoxx Belgium and Devoxx Morocco, [Ray Tsang](https://twitter.com/saturnism) and I ([Arjen Wassink](https://twitter.com/ArjenWassink)) showed a Raspberry Pi cluster we built at Quintor running HypriotOS, Docker and Kubernetes. While we received many compliments on the talk, the most common question was about how to build a Pi cluster themselves! We’ll be doing just that, in two parts. The [first part covered the shopping list for the cluster](http://blog.kubernetes.io/2015/11/creating-a-Raspberry-Pi-cluster-running-Kubernetes-the-shopping-list-Part-1.html), and this second one will show you how to get kubernetes up and running . . .
|
||||
|
||||
|
||||
Now you got your Raspberry Pi Cluster all setup, it is time to run some software on it. As mentioned in the previous blog I based this tutorial on the Hypriot linux distribution for the ARM processor. Main reason is the bundled support for Docker. I used [this version of Hypriot](http://downloads.hypriot.com/hypriot-rpi-20151004-132414.img.zip) for this tutorial, so if you run into trouble with other versions of Hypriot, please consider the version I’ve used.
|
||||
|
||||
First step is to make sure every Pi has Hypriot running, if not yet please check the [getting started guide](http://blog.hypriot.com/getting-started-with-docker-on-your-arm-device/) of them. Also hook up the cluster switch to a network so that Internet is available and every Pi get an IP-address assigned via DHCP. Because we will be running multiple Pi’s it is practical to give each Pi a unique hostname. I renamed my Pi’s to rpi-master, rpi-node-1, rpi-node-2, etc for my convenience. Note that on Hypriot the hostname is set by editing the /boot/occidentalis.txt file, not the /etc/hostname. You could also set the hostname using the Hypriot flash tool.
|
||||
|
||||
|
||||
The most important thing about running software on a Pi is the availability of an ARM distribution. Thanks to [Brendan Burns](https://twitter.com/brendandburns), there are Kubernetes components for ARM available in the [Google Cloud Registry](https://cloud.google.com/container-registry/docs/). That’s great. The second hurdle is how to install Kubernetes. There are two ways; directly on the system or in a Docker container. Although the container support has an experimental status, I choose to go for that because it makes it easier to install Kubernetes for you. Kubernetes requires several processes (etcd, flannel, kubeclt, etc) to run on a node, which should be started in a specific order. To ease that, systemd services are made available to start the necessary processes in the right way. Also the systemd services make sure that Kubernetes is spun up when a node is (re)booted. To make the installation real easy I created an simple install script for the master node and the worker nodes. All is available at [Github](https://github.com/awassink/k8s-on-rpi). So let’s get started now!
|
||||
|
||||
### Installing the Kubernetes master node
|
||||
|
||||
First we will be installing Kubernetes on the master node and add the worker nodes later to the cluster. It comes basically down to getting the git repository content and executing the installation script.
|
||||
|
||||
```
|
||||
$ curl -L -o k8s-on-rpi.zip https://github.com/awassink/k8s-on-rpi/archive/master.zip
|
||||
|
||||
$ apt-get update
|
||||
|
||||
$ apt-get install unzip
|
||||
|
||||
$ unzip k8s-on-rpi.zip
|
||||
|
||||
$ k8s-on-rpi-master/install-k8s-master.sh
|
||||
```
|
||||
|
||||
The install script will install five services:
|
||||
|
||||
* docker-bootstrap.service - is a separate Docker daemon to run etcd and flannel because flannel needs to be running before the standard Docker daemon (docker.service) because of network configuration.
|
||||
* k8s-etcd.service - is the etcd service for storing flannel and kubelet data.
|
||||
* k8s-flannel.service - is the flannel process providing an overlay network over all nodes in the cluster.
|
||||
* docker.service - is the standard Docker daemon, but with flannel as a network bridge. It will run all Docker containers.
|
||||
* k8s-master.service - is the kubernetes master service providing the cluster functionality.
|
||||
|
||||
The basic details of this installation procedure is also documented in the [Getting Started Guide](https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/getting-started-guides/docker-multinode/master.md) of Kubernetes. Please check it to get more insight on how a multi node Kubernetes cluster is setup.
|
||||
|
||||
|
||||
Let’s check if everything is working correctly. Two docker daemon processes must be running.
|
||||
|
||||
```
|
||||
$ ps -ef|grep docker
|
||||
root 302 1 0 04:37 ? 00:00:14 /usr/bin/docker daemon -H unix:///var/run/docker-bootstrap.sock -p /var/run/docker-bootstrap.pid --storage-driver=overlay --storage-opt dm.basesize=10G --iptables=false --ip-masq=false --bridge=none --graph=/var/lib/docker-bootstrap
|
||||
|
||||
root 722 1 11 04:38 ? 00:16:11 /usr/bin/docker -d -bip=10.0.97.1/24 -mtu=1472 -H fd:// --storage-driver=overlay -D
|
||||
```
|
||||
{: .scale-yaml}
|
||||
|
||||
|
||||
The etcd and flannel containers must be up.
|
||||
```
|
||||
$ docker -H unix:///var/run/docker-bootstrap.sock ps
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
|
||||
4855cc1450ff andrewpsuedonym/flanneld "flanneld --etcd-endp" 2 hours ago Up 2 hours k8s-flannel
|
||||
|
||||
ef410b986cb3 andrewpsuedonym/etcd:2.1.1 "/bin/etcd --addr=127" 2 hours ago Up 2 hours k8s-etcd
|
||||
|
||||
|
||||
The hyperkube kubelet, apiserver, scheduler, controller and proxy must be up.
|
||||
|
||||
$ docker ps
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
|
||||
a17784253dd2 gcr.io/google\_containers/hyperkube-arm:v1.1.2 "/hyperkube controlle" 2 hours ago Up 2 hours k8s\_controller-manager.7042038a\_k8s-master-127.0.0.1\_default\_43160049df5e3b1c5ec7bcf23d4b97d0\_2174a7c3
|
||||
|
||||
a0fb6a169094 gcr.io/google\_containers/hyperkube-arm:v1.1.2 "/hyperkube scheduler" 2 hours ago Up 2 hours k8s\_scheduler.d905fc61\_k8s-master-127.0.0.1\_default\_43160049df5e3b1c5ec7bcf23d4b97d0\_511945f8
|
||||
|
||||
d93a94a66d33 gcr.io/google\_containers/hyperkube-arm:v1.1.2 "/hyperkube apiserver" 2 hours ago Up 2 hours k8s\_apiserver.f4ad1bfa\_k8s-master-127.0.0.1\_default\_43160049df5e3b1c5ec7bcf23d4b97d0\_b5b4936d
|
||||
|
||||
db034473b334 gcr.io/google\_containers/hyperkube-arm:v1.1.2 "/hyperkube kubelet -" 2 hours ago Up 2 hours k8s-master
|
||||
|
||||
f017f405ff4b gcr.io/google\_containers/hyperkube-arm:v1.1.2 "/hyperkube proxy --m" 2 hours ago Up 2 hours k8s-master-proxy
|
||||
```
|
||||
{: .scale-yaml}
|
||||
|
||||
### Deploying the first pod and service on the cluster
|
||||
|
||||
When that’s looking good we’re able to access the master node of the Kubernetes cluster with kubectl. Kubectl for ARM can be downloaded from googleapis storage. kubectl get nodes shows which cluster nodes are registered with its status. The master node is named 127.0.0.1.
|
||||
```
|
||||
$ curl -fsSL -o /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/arm/kubectl
|
||||
|
||||
$ kubectl get nodes
|
||||
|
||||
NAME LABELS STATUS AGE
|
||||
|
||||
127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready 1h
|
||||
|
||||
|
||||
An easy way to test the cluster is by running a busybox docker image for ARM. kubectl run can be used to run the image as a container in a pod. kubectl get pods shows the pods that are registered with its status.
|
||||
|
||||
$ kubectl run busybox --image=hypriot/rpi-busybox-httpd
|
||||
|
||||
$ kubectl get pods -o wide
|
||||
|
||||
NAME READY STATUS RESTARTS AGE NODE
|
||||
|
||||
busybox-fry54 1/1 Running 1 1h 127.0.0.1
|
||||
|
||||
k8s-master-127.0.0.1 3/3 Running 6 1h 127.0.0.1
|
||||
```
|
||||
{: .scale-yaml}
|
||||
|
||||
Now the pod is running but the application is not generally accessible. That can be achieved by creating a service. The cluster IP-address is the IP-address the service is avalailable within the cluster. Use the IP-address of your master node as external IP and the service becomes available outside of the cluster (e.g. at http://192.168.192.161 in my case).
|
||||
```
|
||||
$ kubectl expose rc busybox --port=90 --target-port=80 --external-ip=\<ip-address-master-node\>
|
||||
|
||||
$ kubectl get svc
|
||||
|
||||
NAME CLUSTER\_IP EXTERNAL\_IP PORT(S) SELECTOR AGE
|
||||
|
||||
busybox 10.0.0.87 192.168.192.161 90/TCP run=busybox 1h
|
||||
|
||||
kubernetes 10.0.0.1 \<none\> 443/TCP \<none\> 2h
|
||||
|
||||
$ curl http://10.0.0.87:90/
|
||||
```
|
||||
{: .scale-yaml}
|
||||
|
||||
```
|
||||
\<html\>
|
||||
|
||||
\<head\>\<title\>Pi armed with Docker by Hypriot\</title\>
|
||||
|
||||
\<body style="width: 100%; background-color: black;"\>
|
||||
|
||||
\<div id="main" style="margin: 100px auto 0 auto; width: 800px;"\>
|
||||
|
||||
\<img src="pi\_armed\_with\_docker.jpg" alt="pi armed with docker" style="width: 800px"\>
|
||||
|
||||
\</div\>
|
||||
|
||||
\</body\>
|
||||
|
||||
\</html\>
|
||||
```
|
||||
### Installing the Kubernetes worker nodes
|
||||
|
||||
The next step is installing Kubernetes on each worker node and add it to the cluster. This also comes basically down to getting the git repository content and executing the installation script. Though in this installation the k8s.conf file needs to be copied on forehand and edited to contain the IP-address of the master node.
|
||||
|
||||
```
|
||||
$ curl -L -o k8s-on-rpi.zip https://github.com/awassink/k8s-on-rpi/archive/master.zip
|
||||
|
||||
$ apt-get update
|
||||
|
||||
$ apt-get install unzip
|
||||
|
||||
$ unzip k8s-on-rpi.zip
|
||||
|
||||
$ mkdir /etc/kubernetes
|
||||
|
||||
$ cp k8s-on-rpi-master/rootfs/etc/kubernetes/k8s.conf /etc/kubernetes/k8s.conf
|
||||
```
|
||||
### Change the ip-address in /etc/kubernetes/k8s.conf to match the master node ###
|
||||
```
|
||||
$ k8s-on-rpi-master/install-k8s-worker.sh
|
||||
```
|
||||
{: .scale-yaml}
|
||||
|
||||
|
||||
The install script will install four services. These are the quite similar to ones on the master node, but with the difference that no etcd service is running and the kubelet service is configured as worker node.
|
||||
|
||||
Once all the services on the worker node are up and running we can check that the node is added to the cluster on the master node.
|
||||
```
|
||||
$ kubectl get nodes
|
||||
|
||||
NAME LABELS STATUS AGE
|
||||
|
||||
127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready 2h
|
||||
|
||||
192.168.192.160 kubernetes.io/hostname=192.168.192.160 Ready 1h
|
||||
|
||||
$ kubectl scale --replicas=2 rc/busybox
|
||||
|
||||
$ kubectl get pods -o wide
|
||||
|
||||
NAME READY STATUS RESTARTS AGE NODE
|
||||
|
||||
busybox-fry54 1/1 Running 1 1h 127.0.0.1
|
||||
|
||||
busybox-j2slu 1/1 Running 0 1h 192.168.192.160
|
||||
|
||||
k8s-master-127.0.0.1 3/3 Running 6 2h 127.0.0.1
|
||||
```
|
||||
{: .scale-yaml}
|
||||
|
||||
### Enjoy your Kubernetes cluster!
|
||||
|
||||
Congratulations! You now have your Kubernetes Raspberry Pi cluster running and can start playing with Kubernetes and start learning. Checkout the [Kubernetes User Guide](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/README.md) to find out what you all can do. And don’t forget to pull some plugs occasionally like Ray and I do :-)
|
||||
|
||||
|
||||
Arjen Wassink, Java Architect and Team Lead, Quintor
|
||||
|
|
@ -0,0 +1,127 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " How Weave built a multi-deployment solution for Scope using Kubernetes "
|
||||
date: Sunday, December 12, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today we hear from Peter Bourgon, Software Engineer at Weaveworks, a company that provides software for developers to network, monitor and control microservices-based apps in docker containers. Peter tells us what was involved in selecting and deploying Kubernetes _
|
||||
|
||||
Earlier this year at Weaveworks we launched [Weave Scope](http://weave.works/product/scope/index.html), an open source solution for visualization and monitoring of containerised apps and services. Recently we released a hosted Scope service into an [Early Access Program](http://blog.weave.works/2015/10/08/weave-the-fastest-path-to-docker-on-amazon-ec2-container-service/). Today, we want to walk you through how we initially prototyped that service, and how we ultimately chose and deployed Kubernetes as our platform.
|
||||
|
||||
|
||||
##### A cloud-native architecture
|
||||
|
||||
Scope already had a clean internal line of demarcation between data collection and user interaction, so it was straightforward to split the application on that line, distribute probes to customers, and host frontends in the cloud. We built out a small set of microservices in the [12-factor model](http://12factor.net/), which includes:
|
||||
|
||||
|
||||
* A users service, to manage and authenticate user accounts
|
||||
* A provisioning service, to manage the lifecycle of customer Scope instances
|
||||
* A UI service, hosting all of the fancy HTML and JavaScript content
|
||||
* A frontend service, to route requests according to their properties
|
||||
* A monitoring service, to introspect the rest of the system
|
||||
|
||||
|
||||
|
||||
All services are built as Docker images, [FROM scratch](https://medium.com/@kelseyhightower/optimizing-docker-images-for-static-binaries-b5696e26eb07#.qqjkud6i0) where possible. We knew that we wanted to offer at least 3 deployment environments, which should be as near to identical as possible.
|
||||
|
||||
|
||||
* An "Airplane Mode" local environment, on each developer's laptop
|
||||
* A development or staging environment, on the same infrastructure that hosts production, with different user credentials
|
||||
* The production environment itself
|
||||
|
||||
|
||||
|
||||
These were our application invariants. Next, we had to choose our platform and deployment model.
|
||||
|
||||
|
||||
##### Our first prototype
|
||||
There are a seemingly infinite set of choices, with an infinite set of possible combinations. After surveying the landscape in mid-2015, we decided to make a prototype with
|
||||
|
||||
|
||||
* [Amazon EC2](https://aws.amazon.com/ec2/) as our cloud platform, including RDS for persistence
|
||||
* [Docker Swarm](https://docs.docker.com/swarm/) as our "scheduler"
|
||||
* [Consul](https://consul.io/) for service discovery when bootstrapping Swarm
|
||||
* [Weave Net](http://weave.works/product/net/) for our network and service discovery for the application itself
|
||||
* [Terraform](https://terraform.io/) as our provisioner
|
||||
|
||||
|
||||
|
||||
This setup was fast to define and fast to deploy, so it was a great way to validate the feasibility of our ideas. But we quickly hit problems.
|
||||
|
||||
|
||||
|
||||
* Terraform's support for [Docker as a provisioner](https://terraform.io/docs/providers/docker) is barebones, and we uncovered [some bugs](https://github.com/hashicorp/terraform/issues/3526) when trying to use it to drive Swarm.
|
||||
* Largely as a consequence of the above, managing a zero-downtime deploy of Docker containers with Terraform was very difficult.
|
||||
* Swarm's _raison d'être_ is to abstract the particulars of multi-node container scheduling behind the familiar Docker CLI/API commands. But we concluded that the API is insufficiently expressive for the kind of operations that are necessary at scale in production.
|
||||
* Swarm provides no fault tolerance in the case of e.g. node failure.
|
||||
|
||||
|
||||
|
||||
We also made a number of mistakes when designing our workflow.
|
||||
|
||||
|
||||
* We tagged each container with its target environment at build time, which simplified our Terraform definitions, but effectively forced us to manage our versions via image repositories. That responsibility belongs in the scheduler, not the artifact store.
|
||||
* As a consequence, every deploy required artifacts to be pushed to all hosts. This made deploys slow, and rollbacks unbearable.
|
||||
* Terraform is designed to provision infrastructure, not cloud applications. The process is slower and more deliberate than we’d like. Shipping a new version of something to prod took about 30 minutes, all-in.
|
||||
|
||||
|
||||
|
||||
When it became clear that the service had potential, we re-evaluated the deployment model with an eye towards the long-term.
|
||||
|
||||
|
||||
##### Rebasing on Kubernetes
|
||||
It had only been a couple of months, but a lot had changed in the landscape.
|
||||
|
||||
|
||||
* HashiCorp released [Nomad](https://nomadproject.io/)
|
||||
* [Kubernetes](https://kubernetes.io/) hit 1.0
|
||||
* Swarm was soon to hit 1.0
|
||||
|
||||
|
||||
|
||||
While many of our problems could be fixed without making fundamental architectural changes, we wanted to capitalize on the advances in the industry, by joining an existing ecosystem, and leveraging the experience and hard work of its contributors.
|
||||
|
||||
After some internal deliberation, we did a small-scale audition of Nomad and Kubernetes. We liked Nomad a lot, but felt it was just too early to trust it with our production service. Also, we found the Kubernetes developers to be the most responsive to issues on GitHub. So, we decided to go with Kubernetes.
|
||||
|
||||
|
||||
##### Local Kubernetes
|
||||
|
||||
First, we would replicate our Airplane Mode local environment with Kubernetes. Because we have developers on both Mac and Linux laptops, it’s important that the local environment is containerised. So, we wanted the Kubernetes components themselves (kubelet, API server, etc.) to run in containers.
|
||||
|
||||
We encountered two main problems. First, and most broadly, creating Kubernetes clusters from scratch is difficult, as it requires deep knowledge of how Kubernetes works, and quite some time to get the pieces to fall in place together. [local-cluster-up.sh](http://local-cluster-up.sh/) seems like a Kubernetes developer’s tool and didn’t leverage containers, and the third-party solutions we found, like [Kubernetes Solo](https://github.com/rimusz/coreos-osx-kubernetes-solo), require a dedicated VM or are platform-specific.
|
||||
|
||||
Second, containerised Kubernetes is still missing several important pieces. Following the [official Kubernetes Docker guide](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md) yields a barebones cluster without certificates or service discovery. We also encountered a couple of usability issues ([#16586](https://github.com/kubernetes/kubernetes/issues/16586), [#17157](https://github.com/kubernetes/kubernetes/issues/17157)), which we resolved by [submitting a patch](https://github.com/kubernetes/kubernetes/pull/17159) and building our own [hyperkube image](https://hub.docker.com/r/2opremio/hyperkube/) from master.
|
||||
|
||||
In the end, we got things working by creating our own provisioning script. It needs to do things like [generate the PKI keys and certificates](https://github.com/kubernetes/kubernetes/blob/master/docs/admin/authentication.md#creating-certificates) and [provision the DNS add-on](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns), which took a few attempts to get right. We’ve also learned of a [commit to add certificate generation to the Docker build](https://github.com/kubernetes/kubernetes/commit/ce90b83689f08cb5ebb6b632dab7f95a48060425), so things will likely get easier in the near term.
|
||||
|
||||
|
||||
##### Kubernetes on AWS
|
||||
|
||||
Next, we would deploy Kubernetes to AWS, and wire it up with the other AWS components. We wanted to stand up the service in production quickly, and we only needed to support Amazon, so we decided to do so without Weave Net and to use a pre-existing provisioning solution. But we’ll definitely revisit this decision in the near future, leveraging Weave Net via Kubernetes plugins.
|
||||
|
||||
Ideally we would have used Terraform resources, and we found a couple: [kraken](https://github.com/Samsung-AG/kraken) (using Ansible), [kubestack](https://github.com/kelseyhightower/kubestack) (coupled to GCE), [kubernetes-coreos-terraform](https://github.com/bakins/kubernetes-coreos-terraform) (outdated Kubernetes) and [coreos-kubernetes](https://github.com/coreos/coreos-kubernetes). But they all build on CoreOS, which was an extra moving part we wanted to avoid in the beginning. (On our next iteration, we’ll probably audition CoreOS.) If you use Ansible, there are [playbooks available](https://github.com/kubernetes/contrib/tree/master/ansible) in the main repo. There are also community-drive [Chef cookbooks](https://github.com/evilmartians/chef-kubernetes) and [Puppet modules](https://forge.puppetlabs.com/cristifalcas/kubernetes). I’d expect the community to grow quickly here.
|
||||
|
||||
The only other viable option seemed to be kube-up, which is a collection of scripts that provision Kubernetes onto a variety of cloud providers. By default, kube-up onto AWS puts the master and minion nodes into their own VPC, or Virtual Private Cloud. But our RDS instances were provisioned in the region-default VPC, which meant that communication from a Kubernetes minion to the DB would be possible only via [VPC peering](http://ben.straub.cc/2015/08/19/kubernetes-aws-vpc-peering/) or by opening the RDS VPC's firewall rules manually.
|
||||
|
||||
To get traffic to traverse a VPC peer link, your destination IP needs to be in the target VPC's private address range. But [it turns out](https://forums.aws.amazon.com/thread.jspa?messageID=681125) that resolving the RDS instance's hostname from anywhere outside the same VPC will yield the public IP. And performing the resolution is important, because RDS reserves the right to change the IP for maintenance. This wasn't ever a concern in the previous infrastructure, because our Terraform scripts simply placed everything in the same VPC. So I thought I'd try the same with Kubernetes; the kube-up script ostensibly supports installing to an existing VPC by specifying a VPC\_ID environment variable, so I tried installing Kubernetes to the RDS VPC. kube-up appeared to succeed, but [service integration via ELBs broke](https://github.com/kubernetes/kubernetes/issues/17647) and[teardown via kube-down stopped working](https://github.com/kubernetes/kubernetes/issues/17219). After some time, we judged it best to let kube-up keep its defaults, and poked a hole in the RDS VPC.
|
||||
|
||||
This was one hiccup among several that we encountered. Each one could be fixed in isolation, but the inherent fragility of using a shell script to provision remote state seemed to be the actual underlying cause. We fully expect the Terraform, Ansible, Chef, Puppet, etc. packages to continue to mature, and hope to switch soon.
|
||||
|
||||
Provisioning aside, there are great things about the Kubernetes/AWS integration. For example, Kubernetes [services](http://kubernetes.io/v1.1/docs/user-guide/services.html) of the correct type automatically generate ELBs, and Kubernetes does a great job of lifecycle management there. Further, the Kubernetes domain model—services, [pods](http://kubernetes.io/v1.1/docs/user-guide/pods.html), [replication controllers](http://kubernetes.io/v1.1/docs/user-guide/replication-controller.html), the [labels and selector model](http://kubernetes.io/v1.1/docs/user-guide/labels.html), and so on—is coherent, and seems to give the user the right amount of expressivity, though the definition files do [tend to stutter needlessly](https://github.com/kubernetes/kubernetes/blob/643cb7a1c7499df4e569f4f0fbd3b18c0c4e63ce/examples/guestbook/redis-master-controller.yaml). The kubectl tool is good, albeit [daunting at first glance](http://i.imgur.com/nEyTWej.png). The [rolling-update](http://kubernetes.io/v1.1/docs/user-guide/update-demo/README.html) command in particular is brilliant: exactly the semantics and behavior I'd expect from a system like this. Indeed, once Kubernetes was up and running, _it just worked_, and exactly as I expected it to. That’s a huge thing.
|
||||
|
||||
|
||||
##### Conclusions
|
||||
|
||||
After a couple weeks of fighting with the machines, we were able to resolve all of our integration issues, and have rolled out a reasonably robust Kubernetes-based system to production.
|
||||
|
||||
|
||||
* **Provisioning Kubernetes is difficult** , owing to a complex architecture and young provisioning story. This shows all signs of improving.
|
||||
* Kubernetes’ non-optional **security model takes time to get right**.
|
||||
* The Kubernetes **domain language is a great match** to the problem domain.
|
||||
* We have **a lot more confidence** in operating our application (It's a lot faster, too.).
|
||||
* And we're **very happy to be part of a growing Kubernetes userbase** , contributing issues and patches as we can and benefitting from the virtuous cycle of open-source development that powers the most exciting software being written today.
|
||||
- Peter Bourgon, Software Engineer at Weaveworks
|
||||
|
||||
_Weave Scope is an open source solution for visualization and monitoring of containerised apps and services. For a hosted Scope service, request an invite to Early Access program at scope.weave.works._
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Managing Kubernetes Pods, Services and Replication Controllers with Puppet "
|
||||
date: Friday, December 17, 2015
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today’s guest post is written by Gareth Rushgrove, Senior Software Engineer at Puppet Labs, a leader in IT automation. Gareth tells us about a new Puppet module that helps manage resources in Kubernetes. _
|
||||
|
||||
People familiar with [Puppet](https://github.com/puppetlabs/puppet) might have used it for managing files, packages and users on host computers. But Puppet is first and foremost a configuration management tool, and config management is a much broader discipline than just managing host-level resources. A good definition of configuration management is that it aims to solve four related problems: identification, control, status accounting and verification and audit. These problems exist in the operation of any complex system, and with the new [Puppet Kubernetes module](https://forge.puppetlabs.com/garethr/kubernetes) we’re starting to look at how we can solve those problems for Kubernetes.
|
||||
|
||||
|
||||
### The Puppet Kubernetes Module
|
||||
|
||||
The Puppet Kubernetes module currently assumes you already have a Kubernetes cluster [up and running](http://kubernetes.io/gettingstarted/). Its focus is on managing the resources in Kubernetes, like Pods, Replication Controllers and Services, not (yet) on managing the underlying kubelet or etcd services. Here’s a quick snippet of code describing a Pod in Puppet’s DSL.
|
||||
|
||||
|
||||
```
|
||||
kubernetes_pod { 'sample-pod':
|
||||
ensure => present,
|
||||
metadata => {
|
||||
namespace => 'default',
|
||||
},
|
||||
spec => {
|
||||
containers => [{
|
||||
name => 'container-name',
|
||||
image => 'nginx',
|
||||
}]
|
||||
},
|
||||
```
|
||||
}
|
||||
|
||||
|
||||
If you’re familiar with the YAML file format, you’ll probably recognise the structure immediately. The interface is intentionally identical to aid conversion between different formats — in fact, the code powering this is autogenerated from the Kubernetes API Swagger definitions. Running the above code, assuming we save it as pod.pp, is as simple as:
|
||||
|
||||
|
||||
```
|
||||
puppet apply pod.pp
|
||||
```
|
||||
|
||||
Authentication uses the standard kubectl configuration file. You can find complete [installation instructions in the module's README](https://github.com/garethr/garethr-kubernetes/blob/master/README.md).
|
||||
|
||||
Kubernetes has several resources, from Pods and Services to Replication Controllers and Service Accounts. You can see an example of the module managing these resources in the [Kubernetes guestbook sample in Puppet](https://puppetlabs.com/blog/kubernetes-guestbook-example-puppet) post. This demonstrates converting the canonical hello-world example to use Puppet code.
|
||||
|
||||
One of the main advantages of using Puppet for this, however, is that you can create your own higher-level and more business-specific interfaces to Kubernetes-managed applications. For instance, for the guestbook, you could create something like the following:
|
||||
|
||||
|
||||
```
|
||||
guestbook { 'myguestbook':
|
||||
redis_slave_replicas => 2,
|
||||
frontend_replicas => 3,
|
||||
redis_master_image => 'redis',
|
||||
redis_slave_image => 'gcr.io/google_samples/gb-redisslave:v1',
|
||||
frontend_image => 'gcr.io/google_samples/gb-frontend:v3',
|
||||
}
|
||||
```
|
||||
|
||||
You can read more about using Puppet’s defined types, and see lots more code examples, in the Puppet blog post, [Building Your Own Abstractions for Kubernetes in Puppet](https://puppetlabs.com/blog/building-your-own-abstractions-kubernetes-puppet).
|
||||
|
||||
|
||||
### Conclusions
|
||||
|
||||
The advantages of using Puppet rather than just the standard YAML files and kubectl are:
|
||||
|
||||
|
||||
- The ability to create your own abstractions to cut down on repetition and craft higher-level user interfaces, like the guestbook example above.
|
||||
- Use of Puppet’s development tools for validating code and for writing unit tests.
|
||||
- Integration with other tools such as Puppet Server, for ensuring that your model in code matches the state of your cluster, and with PuppetDB for storing reports and tracking changes.
|
||||
- The ability to run the same code repeatedly against the Kubernetes API, to detect any changes or remediate configuration drift.
|
||||
|
||||
It’s also worth noting that most large organisations will have very heterogenous environments, running a wide range of software and operating systems. Having a single toolchain that unifies those discrete systems can make adopting new technology like Kubernetes much easier.
|
||||
|
||||
It’s safe to say that Kubernetes provides an excellent set of primitives on which to build cloud-native systems. And with Puppet, you can address some of the operational and configuration management issues that come with running any complex system in production. [Let us know](mailto:gareth@puppetlabs.com) what you think if you try the module out, and what else you’d like to see supported in the future.
|
||||
|
||||
- Gareth Rushgrove, Senior Software Engineer, Puppet Labs
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Community Meeting Notes - 20160114 "
|
||||
date: Friday, January 28, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
##### January 14 - RackN demo, testing woes, and KubeCon EU CFP.
|
||||
---
|
||||
Note taker: Joe Beda
|
||||
---
|
||||
* Demonstration: Automated Deploy on Metal, AWS and others w/ Digital Rebar, Rob Hirschfeld and Greg Althaus from RackN
|
||||
|
||||
* Greg Althaus. CTO. Digital Rebar is the product. Bare metal provisioning tool.
|
||||
|
||||
* Detect hardware, bring it up, configure raid, OS and get workload deployed.
|
||||
|
||||
* Been working on Kubernetes workload.
|
||||
|
||||
* Seeing trend to start in cloud and then move back to bare metal.
|
||||
|
||||
* New provider model to use provisioning system on both cloud and bare metal.
|
||||
|
||||
* UI, REST API, CLI
|
||||
|
||||
* Demo: Packet -- bare metal as a service
|
||||
|
||||
* 4 nodes running grouped into a "deployment"
|
||||
|
||||
* Functional roles/operations selected per node.
|
||||
|
||||
* Decomposed the kubernetes bring up into units that can be ordered and synchronized. Dependency tree -- things like wait for etcd to be up before starting k8s master.
|
||||
|
||||
* Using the Ansible playbook under the covers.
|
||||
|
||||
* Demo brings up 5 more nodes -- packet will build those nodes
|
||||
|
||||
* Pulled out basic parameters from the ansible playbook. Things like the network config, dns set up, etc.
|
||||
|
||||
* Hierarchy of roles pulls in other components -- making a node a master brings in a bunch of other roles that are necessary for that.
|
||||
|
||||
* Has all of this combined into a command line tool with a simple config file.
|
||||
|
||||
* Forward: extending across multiple clouds for test deployments. Also looking to create split/replicated across bare metal and cloud.
|
||||
|
||||
* Q: secrets?
|
||||
A: using ansible playbooks. Builds own certs and then distributes them. Wants to abstract them out and push that stuff upstream.
|
||||
|
||||
* Q: Do you support bringing up from real bare metal with PXE boot?
|
||||
A: yes -- will discover bare metal systems and install OS, install ssh keys, build networking, etc.
|
||||
* [from SIG-scalability] Q: What is the status of moving to golang 1.5?
|
||||
A: At HEAD we are 1.5 but will support 1.4 also. Some issues with flakiness but looks like things are stable now.
|
||||
|
||||
* Also looking to use the 1.5 vendor experiment. Move away from godep. But can't do that until 1.5 is the baseline.
|
||||
|
||||
* Sarah: one of the things we are working on is rewards for doing stuff like this. Cloud credits, tshirts, poker chips, ponies.
|
||||
* [from SIG-scalability] Q: What is the status of cleaning up the jenkins based submit queue? What can the community do to help out?
|
||||
A: It has been rocky the last few days. There should be issues associated with each of these. There is a [flake label][1] on those issues.
|
||||
|
||||
* Still working on test federation. More test resources now. Happening slowly but hopefully faster as new people come up to speed. Will be great to having lots of folks doing e2e tests on their environments.
|
||||
|
||||
* Erick Fjeta is the new test lead
|
||||
|
||||
* Brendan is happy to help share details on Jenkins set up but that shouldn't be necessary.
|
||||
|
||||
* Federation may use Jenkins API but doesn't require Jenkins itself.
|
||||
|
||||
* Joe bitches about the fact that running the e2e tests in the way Jenkins is tricky. Brendan says it should be runnable easily. Joe will take another look.
|
||||
|
||||
* Conformance tests? etune did this but he isn't here. - revisit 20150121
|
||||
* * March 10-11 in London. Venue to be announced this week.
|
||||
|
||||
* Please send talks! CFP deadline looks to be Feb 5.
|
||||
|
||||
* Lots of excitement. Looks to be 700-800 people. Bigger than SF version (560 ppl).
|
||||
|
||||
* Buy tickets early -- early bird prices will end soon and price will go up 100 GBP.
|
||||
|
||||
* Accommodations provided for speakers?
|
||||
|
||||
* Q from Bob @ Samsung: Can we get more warning/planning for stuff like this:
|
||||
|
||||
* A: Sarah -- I don't hear about this stuff much in advance but will try to pull together a list. Working to make the events page on kubernetes.io easier to use.
|
||||
|
||||
* A: JJ -- we'll make sure we give more info earlier for the next US conf.
|
||||
* Scale tests [Rob Hirschfeld from RackN] -- if you want to help coordinate on scale tests we'd love to help.
|
||||
|
||||
* Bob invited Rob to join the SIG-scale group.
|
||||
|
||||
* There is also a big bare metal cluster through the CNCF (from Intel) that will be useful too. No hard dates yet on that.
|
||||
* Notes/video going to be posted on k8s blog. (Video for 20150114 wasn't recorded. Fail.)
|
||||
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel][2], taking a look at the [Kubernetes project][3] on GitHub, or join the [Kubernetes-dev Google group][4]. If you're really excited, you can do all of the above and join us for the next community conversation - January 27th, 2016. Please add yourself or a topic you want to know about to the [agenda][5] and get a calendar invitation by joining [this group][6].
|
||||
|
||||
|
||||
|
||||
[1]: https://github.com/kubernetes/kubernetes/labels/kind%2Fflake
|
||||
[2]: http://slack.k8s.io/
|
||||
[3]: https://github.com/kubernetes/
|
||||
[4]: https://groups.google.com/forum/#!forum/kubernetes-dev
|
||||
[5]: https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit#
|
||||
[6]: https://groups.google.com/forum/#!forum/kubernetes-community-video-chat
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: Kubernetes Community Meeting Notes - 20160121
|
||||
date: Thursday, January 28, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
#### January 21 - Configuration, Federation and Testing, oh my.
|
||||
|
||||
|
||||
Note taker: Rob Hirshfeld
|
||||
- Use Case (10 min): [SFDC Paul Brown](https://docs.google.com/a/google.com/presentation/d/1MEI97efplr3f-GDX1GcWGfkEuGKKV-4niu27kHOeMLk/edit?usp=sharing_eid&ts=56a114f8)
|
||||
- SIG Report - SIG-config and the story of [#18215](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}
|
||||
|
||||
- A[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[f](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[g](https://github.com/kubernetes/kubernetes/pull/18215) [I](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}N[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}K[8](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[,](https://github.com/kubernetes/kubernetes/pull/18215) [n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[t](https://github.com/kubernetes/kubernetes/pull/18215) [d](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}y[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[f](https://github.com/kubernetes/kubernetes/pull/18215) [K](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}8[s](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
- [T](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[c](https://github.com/kubernetes/kubernetes/pull/18215) [h](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[s](https://github.com/kubernetes/kubernetes/pull/18215) [b](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[f](https://github.com/kubernetes/kubernetes/pull/18215) [c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}f[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}g[u](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[n](https://github.com/kubernetes/kubernetes/pull/18215),[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[f](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[y](https://github.com/kubernetes/kubernetes/pull/18215) [p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[z](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[](https://github.com/kubernetes/kubernetes/pull/18215)([a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}k[a](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[s](https://github.com/kubernetes/kubernetes/pull/18215))[. Needs:](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
|
||||
- n[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[d](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[d](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}g[](https://github.com/kubernetes/kubernetes/pull/18215)([c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[u](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[r](https://github.com/kubernetes/kubernetes/pull/18215) [n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[e](https://github.com/kubernetes/kubernetes/pull/18215))
|
||||
- s[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[g](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[t](https://github.com/kubernetes/kubernetes/pull/18215) [c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}m[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}z[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[n (naming changes, but not major config)](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
- [m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[n](https://github.com/kubernetes/kubernetes/pull/18215) [h](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[w](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}d[o](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[d](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}g[](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
|
||||
-
|
||||
a[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}w[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[g](https://github.com/kubernetes/kubernetes/pull/18215) [e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}x[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[r](https://github.com/kubernetes/kubernetes/pull/18215) [s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[x](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[s](https://github.com/kubernetes/kubernetes/pull/18215).
|
||||
-
|
||||
P[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[S](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[t](https://github.com/kubernetes/kubernetes/pull/18215) [c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[s](https://github.com/kubernetes/kubernetes/pull/18215) [i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[s](https://github.com/kubernetes/kubernetes/pull/18215) [w](https://github.com/kubernetes/kubernetes/pull/18215)/[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[b](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[e](https://github.com/kubernetes/kubernetes/pull/18215) [n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[e](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
-
|
||||
[W](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}k[f](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}w[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[l](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
-
|
||||
[D](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[b](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[d](https://github.com/kubernetes/kubernetes/pull/18215) [C](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[n](https://github.com/kubernetes/kubernetes/pull/18215).
|
||||
-
|
||||
C[h](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[g](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[s](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[f](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[g](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[d](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[e](https://github.com/kubernetes/kubernetes/pull/18215) [m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[b](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}j[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[n](https://github.com/kubernetes/kubernetes/pull/18215) [s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[q](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e
|
||||
-
|
||||
T[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}y[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[g](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}f[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}g[u](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[e](https://github.com/kubernetes/kubernetes/pull/18215) [o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[t](https://github.com/kubernetes/kubernetes/pull/18215) [h](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[w](https://github.com/kubernetes/kubernetes/pull/18215) [b](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[e](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[e](https://github.com/kubernetes/kubernetes/pull/18215) [m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}y[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[f](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[g](https://github.com/kubernetes/kubernetes/pull/18215) [o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[s](https://github.com/kubernetes/kubernetes/pull/18215) [o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[t](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[e](https://github.com/kubernetes/kubernetes/pull/18215) [(](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}m[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[,](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}f[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[m](https://github.com/kubernetes/kubernetes/pull/18215),[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[n](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}b[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[/](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[)](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
-
|
||||
[G](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[s](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215)“[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}p[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[e](https://github.com/kubernetes/kubernetes/pull/18215) [w](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[e](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}y[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[”](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}k[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[p](https://github.com/kubernetes/kubernetes/pull/18215) [i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}m[p](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[e](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
-
|
||||
[Q](https://github.com/kubernetes/kubernetes/pull/18215): is there an opinion for the keystore sizing
|
||||
|
||||
-
|
||||
large size / data blob would not be appropriate
|
||||
-
|
||||
you can pull data(config) from another store for larger objects
|
||||
-
|
||||
[SIG Report - SIG-federation - progress on Ubernetes-Lite & Ubernetes design](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
|
||||
-
|
||||
[G](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}i[t](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}b[e](https://github.com/kubernetes/kubernetes/pull/18215) [a](https://github.com/kubernetes/kubernetes/pull/18215)b[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[o](https://github.com/kubernetes/kubernetes/pull/18215) [h](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[v](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}m[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}g[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[,](https://github.com/kubernetes/kubernetes/pull/18215) [s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}o[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}y[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}u[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}c[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}n[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}f[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}d[e](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[e](https://github.com/kubernetes/kubernetes/pull/18215) [c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[u](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[r](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[. They will automatically distribute the pods.](https://github.com/kubernetes/kubernetes/pull/18215)
|
||||
-
|
||||
P[l](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[n](https://github.com/kubernetes/kubernetes/pull/18215) [i](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[o](https://github.com/kubernetes/kubernetes/pull/18215) [u](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[e](https://github.com/kubernetes/kubernetes/pull/18215) [t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}h[e](https://github.com/kubernetes/kubernetes/pull/18215) [s](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}a[m](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}A[P](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}I[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}f[o](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}t[h](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}m[a](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[t](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}e[r](https://github.com/kubernetes/kubernetes/pull/18215) [c](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}l[u](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}s[te](https://github.com/kubernetes/kubernetes/pull/18215){: .inline-link}r
|
||||
- Quinton's Ubernetes Talk: https://youtu.be/L2ZK24JojB4
|
||||
- Design for Ubernetes: https://github.com/kubernetes/kubernetes/pull/19313
|
||||
|
||||
|
||||
- Conformance testing Q+A [Isaac Hollander McCreery]
|
||||
|
||||
- status on conformance testing for release process
|
||||
|
||||
- expect to be forward compatible but not backwards
|
||||
- is there interest for a sig-testing meeting
|
||||
- testing needs to a higher priority for the project
|
||||
- lots of focus on trying to make this a higher priority
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel](http://slack.k8s.io/), taking a look at the [Kubernetes project](https://github.com/kubernetes/) on GitHub, or join the [Kubernetes-dev Google group](https://groups.google.com/forum/#!forum/kubernetes-dev). If you’re really excited, you can do all of the above and join us for the next community conversation -- January 27th, 2016. Please add yourself or a topic you want to know about to the [agenda](https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit) and get a calendar invitation by joining [this group](https://groups.google.com/forum/#!forum/kubernetes-community-video-chat).
|
||||
|
||||
|
||||
|
||||
Still want more Kubernetes? Check out the [recording](https://www.youtube.com/watch?v=izQLFx_6kwY&feature=youtu.be&list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ) of this meeting and the growing of the archive of [Kubernetes Community Meetings](https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ).
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Simple leader election with Kubernetes and Docker "
|
||||
date: Tuesday, January 11, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
#### Overview
|
||||
|
||||
|
||||
Kubernetes simplifies the deployment and operational management of services running on clusters. However, it also simplifies the development of these services. In this post we'll see how you can use Kubernetes to easily perform leader election in your distributed application. Distributed applications usually replicate the tasks of a service for reliability and scalability, but often it is necessary to designate one of the replicas as the leader who is responsible for coordination among all of the replicas.
|
||||
|
||||
Typically in leader election, a set of candidates for becoming leader is identified. These candidates all race to declare themselves the leader. One of the candidates wins and becomes the leader. Once the election is won, the leader continually "heartbeats" to renew their position as the leader, and the other candidates periodically make new attempts to become the leader. This ensures that a new leader is identified quickly, if the current leader fails for some reason.
|
||||
|
||||
Implementing leader election usually requires either deploying software such as ZooKeeper, etcd or Consul and using it for consensus, or alternately, implementing a consensus algorithm on your own. We will see below that Kubernetes makes the process of using leader election in your application significantly easier.
|
||||
|
||||
#### Implementing leader election in Kubernetes
|
||||
|
||||
|
||||
The first requirement in leader election is the specification of the set of candidates for becoming the leader. Kubernetes already uses _Endpoints_ to represent a replicated set of pods that comprise a service, so we will re-use this same object. (aside: You might have thought that we would use _ReplicationControllers_, but they are tied to a specific binary, and generally you want to have a single leader even if you are in the process of performing a rolling update)
|
||||
|
||||
To perform leader election, we use two properties of all Kubernetes API objects:
|
||||
|
||||
* ResourceVersions - Every API object has a unique ResourceVersion, and you can use these versions to perform compare-and-swap on Kubernetes objects
|
||||
* Annotations - Every API object can be annotated with arbitrary key/value pairs to be used by clients.
|
||||
|
||||
Given these primitives, the code to use master election is relatively straightforward, and you can find it [here][1]. Let's run it ourselves.
|
||||
|
||||
```
|
||||
$ kubectl run leader-elector --image=gcr.io/google_containers/leader-elector:0.4 --replicas=3 -- --election=example
|
||||
```
|
||||
|
||||
This creates a leader election set with 3 replicas:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
leader-elector-inmr1 1/1 Running 0 13s
|
||||
leader-elector-qkq00 1/1 Running 0 13s
|
||||
leader-elector-sgwcq 1/1 Running 0 13s
|
||||
```
|
||||
|
||||
To see which pod was chosen as the leader, you can access the logs of one of the pods, substituting one of your own pod's names in place of
|
||||
|
||||
```
|
||||
${pod_name}, (e.g. leader-elector-inmr1 from the above)
|
||||
|
||||
$ kubectl logs -f ${name}
|
||||
leader is (leader-pod-name)
|
||||
```
|
||||
… Alternately, you can inspect the endpoints object directly:
|
||||
|
||||
|
||||
_'example' is the name of the candidate set from the above kubectl run … command_
|
||||
```
|
||||
$ kubectl get endpoints example -o yaml
|
||||
```
|
||||
Now to validate that leader election actually works, in a different terminal, run:
|
||||
|
||||
```
|
||||
$ kubectl delete pods (leader-pod-name)
|
||||
```
|
||||
This will delete the existing leader. Because the set of pods is being managed by a replication controller, a new pod replaces the one that was deleted, ensuring that the size of the replicated set is still three. Via leader election one of these three pods is selected as the new leader, and you should see the leader failover to a different pod. Because pods in Kubernetes have a _grace period_ before termination, this may take 30-40 seconds.
|
||||
|
||||
The leader-election container provides a simple webserver that can serve on any address (e.g. http://localhost:4040). You can test this out by deleting the existing leader election group and creating a new one where you additionally pass in a --http=(host):(port) specification to the leader-elector image. This causes each member of the set to serve information about the leader via a webhook.
|
||||
|
||||
```
|
||||
# delete the old leader elector group
|
||||
$ kubectl delete rc leader-elector
|
||||
|
||||
# create the new group, note the --http=localhost:4040 flag
|
||||
$ kubectl run leader-elector --image=gcr.io/google_containers/leader-elector:0.4 --replicas=3 -- --election=example --http=0.0.0.0:4040
|
||||
|
||||
# create a proxy to your Kubernetes api server
|
||||
$ kubectl proxy
|
||||
```
|
||||
|
||||
You can then access:
|
||||
|
||||
|
||||
http://localhost:8001/api/v1/proxy/namespaces/default/pods/(leader-pod-name):4040/
|
||||
|
||||
|
||||
And you will see:
|
||||
|
||||
```
|
||||
{"name":"(name-of-leader-here)"}
|
||||
```
|
||||
#### Leader election with sidecars
|
||||
|
||||
|
||||
Ok, that's great, you can do leader election and find out the leader over HTTP, but how can you use it from your own application? This is where the notion of sidecars come in. In Kubernetes, Pods are made up of one or more containers. Often times, this means that you add sidecar containers to your main application to make up a Pod. (for a much more detailed treatment of this subject see my earlier blog post).
|
||||
|
||||
The leader-election container can serve as a sidecar that you can use from your own application. Any container in the Pod that's interested in who the current master is can simply access http://localhost:4040 and they'll get back a simple JSON object that contains the name of the current master. Since all containers in a Pod share the same network namespace, there's no service discovery required!
|
||||
|
||||
For example, here is a simple Node.js application that connects to the leader election sidecar and prints out whether or not it is currently the master. The leader election sidecar sets its identifier to `hostname` by default.
|
||||
|
||||
```
|
||||
var http = require('http');
|
||||
// This will hold info about the current master
|
||||
var master = {};
|
||||
|
||||
// The web handler for our nodejs application
|
||||
var handleRequest = function(request, response) {
|
||||
response.writeHead(200);
|
||||
response.end("Master is " + master.name);
|
||||
};
|
||||
|
||||
// A callback that is used for our outgoing client requests to the sidecar
|
||||
var cb = function(response) {
|
||||
var data = '';
|
||||
response.on('data', function(piece) { data = data + piece; });
|
||||
response.on('end', function() { master = JSON.parse(data); });
|
||||
};
|
||||
|
||||
// Make an async request to the sidecar at http://localhost:4040
|
||||
var updateMaster = function() {
|
||||
var req = http.get({host: 'localhost', path: '/', port: 4040}, cb);
|
||||
req.on('error', function(e) { console.log('problem with request: ' + e.message); });
|
||||
req.end();
|
||||
};
|
||||
|
||||
/ / Set up regular updates
|
||||
updateMaster();
|
||||
setInterval(updateMaster, 5000);
|
||||
|
||||
// set up the web server
|
||||
var www = http.createServer(handleRequest);
|
||||
www.listen(8080);
|
||||
```
|
||||
Of course, you can use this sidecar from any language that you choose that supports HTTP and JSON.
|
||||
|
||||
#### Conclusion
|
||||
|
||||
|
||||
Hopefully I've shown you how easy it is to build leader election for your distributed application using Kubernetes. In future installments we'll show you how Kubernetes is making building distributed systems even easier. In the meantime, head over to [Google Container Engine][2] or [kubernetes.io][3] to get started with Kubernetes.
|
||||
|
||||
[1]: https://github.com/kubernetes/contrib/pull/353
|
||||
[2]: https://cloud.google.com/container-engine/
|
||||
[3]: http://kubernetes.io/
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Why Kubernetes doesn’t use libnetwork "
|
||||
date: Friday, January 14, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Kubernetes has had a very basic form of network plugins since before version 1.0 was released — around the same time as Docker's [libnetwork](https://github.com/docker/libnetwork) and Container Network Model ([CNM](https://github.com/docker/libnetwork/blob/master/docs/design.md)) was introduced. Unlike libnetwork, the Kubernetes plugin system still retains its "alpha" designation. Now that Docker's network plugin support is released and supported, an obvious question we get is why Kubernetes has not adopted it yet. After all, vendors will almost certainly be writing plugins for Docker — we would all be better off using the same drivers, right?
|
||||
|
||||
Before going further, it's important to remember that Kubernetes is a system that supports multiple container runtimes, of which Docker is just one. Configuring networking is a facet of each runtime, so when people ask "will Kubernetes support CNM?" what they really mean is "will kubernetes support CNM drivers with the Docker runtime?" It would be great if we could achieve common network support across runtimes, but that’s not an explicit goal.
|
||||
|
||||
Indeed, Kubernetes has not adopted CNM/libnetwork for the Docker runtime. In fact, we’ve been investigating the alternative Container Network Interface ([CNI](https://github.com/appc/cni/blob/master/SPEC.md)) model put forth by CoreOS and part of the App Container ([appc](https://github.com/appc)) specification. Why? There are a number of reasons, both technical and non-technical.
|
||||
|
||||
First and foremost, there are some fundamental assumptions in the design of Docker's network drivers that cause problems for us.
|
||||
|
||||
Docker has a concept of "local" and "global" drivers. Local drivers (such as "bridge") are machine-centric and don’t do any cross-node coordination. Global drivers (such as "overlay") rely on [libkv](https://github.com/docker/libkv) (a key-value store abstraction) to coordinate across machines. This key-value store is a another plugin interface, and is very low-level (keys and values, no semantic meaning). To run something like Docker's overlay driver in a Kubernetes cluster, we would either need cluster admins to run a whole different instance of [consul](https://github.com/hashicorp/consul), [etcd](https://github.com/coreos/etcd) or [zookeeper](https://zookeeper.apache.org/) (see [multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/)), or else we would have to provide our own libkv implementation that was backed by Kubernetes.
|
||||
|
||||
The latter sounds attractive, and we tried to implement it, but the libkv interface is very low-level, and the schema is defined internally to Docker. We would have to either directly expose our underlying key-value store or else offer key-value semantics (on top of our structured API which is itself implemented on a key-value system). Neither of those are very attractive for performance, scalability and security reasons. The net result is that the whole system would significantly be more complicated, when the goal of using Docker networking is to simplify things.
|
||||
|
||||
For users that are willing and able to run the requisite infrastructure to satisfy Docker global drivers and to configure Docker themselves, Docker networking should "just work." Kubernetes will not get in the way of such a setup, and no matter what direction the project goes, that option should be available. For default installations, though, the practical conclusion is that this is an undue burden on users and we therefore cannot use Docker's global drivers (including "overlay"), which eliminates a lot of the value of using Docker's plugins at all.
|
||||
|
||||
Docker's networking model makes a lot of assumptions that aren’t valid for Kubernetes. In docker versions 1.8 and 1.9, it includes a fundamentally flawed implementation of "discovery" that results in corrupted `/etc/hosts` files in containers ([docker #17190](https://github.com/docker/docker/issues/17190)) — and this cannot be easily turned off. In version 1.10 Docker is planning to [bundle a new DNS server](https://github.com/docker/docker/issues/17195), and it’s unclear whether this will be able to be turned off. Container-level naming is not the right abstraction for Kubernetes — we already have our own concepts of service naming, discovery, and binding, and we already have our own DNS schema and server (based on the well-established [SkyDNS](https://github.com/skynetservices/skydns)). The bundled solutions are not sufficient for our needs but are not disableable.
|
||||
|
||||
Orthogonal to the local/global split, Docker has both in-process and out-of-process ("remote") plugins. We investigated whether we could bypass libnetwork (and thereby skip the issues above) and drive Docker remote plugins directly. Unfortunately, this would mean that we could not use any of the Docker in-process plugins, "bridge" and "overlay" in particular, which again eliminates much of the utility of libnetwork.
|
||||
|
||||
On the other hand, CNI is more philosophically aligned with Kubernetes. It's far simpler than CNM, doesn't require daemons, and is at least plausibly cross-platform (CoreOS’s [rkt](https://coreos.com/rkt/docs/) container runtime supports it). Being cross-platform means that there is a chance to enable network configurations which will work the same across runtimes (e.g. Docker, Rocket, Hyper). It follows the UNIX philosophy of doing one thing well.
|
||||
|
||||
Additionally, it's trivial to wrap a CNI plugin and produce a more customized CNI plugin — it can be done with a simple shell script. CNM is much more complex in this regard. This makes CNI an attractive option for rapid development and iteration. Early prototypes have proven that it's possible to eject almost 100% of the currently hard-coded network logic in kubelet into a plugin.
|
||||
|
||||
We investigated [writing a "bridge" CNM driver](https://groups.google.com/forum/#!topic/kubernetes-sig-network/5MWRPxsURUw) for Docker that ran CNI drivers. This turned out to be very complicated. First, the CNM and CNI models are very different, so none of the "methods" lined up. We still have the global vs. local and key-value issues discussed above. Assuming this driver would declare itself local, we have to get info about logical networks from Kubernetes.
|
||||
|
||||
Unfortunately, Docker drivers are hard to map to other control planes like Kubernetes. Specifically, drivers are not told the name of the network to which a container is being attached — just an ID that Docker allocates internally. This makes it hard for a driver to map back to any concept of network that exists in another system.
|
||||
|
||||
This and other issues have been brought up to Docker developers by network vendors, and are usually closed as "working as intended" ([libnetwork #139](https://github.com/docker/libnetwork/issues/139), [libnetwork #486](https://github.com/docker/libnetwork/issues/486), [libnetwork #514](https://github.com/docker/libnetwork/pull/514), [libnetwork #865](https://github.com/docker/libnetwork/issues/865), [docker #18864](https://github.com/docker/docker/issues/18864)), even though they make non-Docker third-party systems more difficult to integrate with. Throughout this investigation Docker has made it clear that they’re not very open to ideas that deviate from their current course or that delegate control. This is very worrisome to us, since Kubernetes complements Docker and adds so much functionality, but exists outside of Docker itself.
|
||||
|
||||
For all of these reasons we have chosen to invest in CNI as the Kubernetes plugin model. There will be some unfortunate side-effects of this. Most of them are relatively minor (for example, `docker inspect` will not show an IP address), but some are significant. In particular, containers started by `docker run` might not be able to communicate with containers started by Kubernetes, and network integrators will have to provide CNI drivers if they want to fully integrate with Kubernetes. On the other hand, Kubernetes will get simpler and more flexible, and a lot of the ugliness of early bootstrapping (such as configuring Docker to use our bridge) will go away.
|
||||
|
||||
As we proceed down this path, we’ll certainly keep our eyes and ears open for better ways to integrate and simplify. If you have thoughts on how we can do that, we really would like to hear them — find us on [slack](http://slack.k8s.io/) or on our [network SIG mailing-list](https://groups.google.com/forum/#!forum/kubernetes-sig-network).
|
||||
|
||||
Tim Hockin, Software Engineer, Google
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " KubeCon EU 2016: Kubernetes Community in London "
|
||||
date: Thursday, February 24, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
KubeCon EU 2016 is the inaugural [European Kubernetes](http://kubernetes.io/) community conference that follows on the American launch in November 2015. KubeCon is fully dedicated to education and community engagement for[Kubernetes](http://kubernetes.io/) enthusiasts, production users and the surrounding ecosystem.
|
||||
|
||||
Come join us in London and hang out with hundreds from the Kubernetes community and experience a wide variety of deep technical expert talks and use cases.
|
||||
|
||||
Don’t miss these great speaker sessions at the conference:
|
||||
|
||||
* “Kubernetes Hardware Hacks: Exploring the Kubernetes API Through Knobs, Faders, and Sliders” by Ian Lewis and Brian Dorsey, Developer Advocate, Google -* [http://sched.co/6Bl3](http://sched.co/6Bl3)
|
||||
|
||||
* “rktnetes: what's new with container runtimes and Kubernetes” by Jonathan Boulle, Developer and Team Lead at CoreOS -* [http://sched.co/6BY7](http://sched.co/6BY7)
|
||||
|
||||
* “Kubernetes Documentation: Contributing, fixing issues, collecting bounties” by John Mulhausen, Lead Technical Writer, Google -* [http://sched.co/6BUP](http://sched.co/6BUP)
|
||||
* “[What is OpenStack's role in a Kubernetes world?](https://kubeconeurope2016.sched.org/event/6BYC/what-is-openstacks-role-in-a-kubernetes-world?iframe=yes&w=i:0;&sidebar=yes&bg=no#?iframe=yes&w=i:100;&sidebar=yes&bg=no)” By Thierry Carrez, Director of Engineering, OpenStack Foundation -* http://sched.co/6BYC
|
||||
* “A Practical Guide to Container Scheduling” by Mandy Waite, Developer Advocate, Google -* [http://sched.co/6BZa](http://sched.co/6BZa)
|
||||
|
||||
* “[Kubernetes in Production in The New York Times newsroom](https://kubeconeurope2016.sched.org/event/67f2/kubernetes-in-production-in-the-new-york-times-newsroom?iframe=yes&w=i:0;&sidebar=yes&bg=no#?iframe=yes&w=i:100;&sidebar=yes&bg=no)” Eric Lewis, Web Developer, New York Times -* [http://sched.co/67f2](http://sched.co/67f2)
|
||||
* “[Creating an Advanced Load Balancing Solution for Kubernetes with NGINX](https://kubeconeurope2016.sched.org/event/6Bc9/creating-an-advanced-load-balancing-solution-for-kubernetes-with-nginx?iframe=yes&w=i:0;&sidebar=yes&bg=no#?iframe=yes&w=i:100;&sidebar=yes&bg=no)” by Andrew Hutchings, Technical Product Manager, NGINX -* http://sched.co/6Bc9
|
||||
* And many more http://kubeconeurope2016.sched.org/
|
||||
|
||||
|
||||
Get your KubeCon EU [tickets here](https://ti.to/kubecon/kubecon-eu-2016).
|
||||
|
||||
Venue Location: CodeNode * 10 South Pl, London, United Kingdom
|
||||
Accommodations: [hotels](https://skillsmatter.com/contact-us#hotels)
|
||||
Website: [kubecon.io](https://www.kubecon.io/)
|
||||
Twitter: [@KubeConio](https://twitter.com/kubeconio) #KubeCon
|
||||
Google is a proud Diamond sponsor of KubeCon EU 2016. Come to London next month, March 10th & 11th, and visit booth #13 to learn all about Kubernetes, Google Container Engine (GKE) and Google Cloud Platform!
|
||||
|
||||
_KubeCon is organized by KubeAcademy, LLC, a community-driven group of developers focused on the education of developers and the promotion of Kubernetes._
|
||||
|
||||
-* Sarah Novotny, Kubernetes Community Manager, Google
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Community Meeting Notes - 20160204 "
|
||||
date: Wednesday, February 09, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
#### February 4th - rkt demo (congratulations on the 1.0, CoreOS!), eBay puts k8s on Openstack and considers Openstack on k8s, SIGs, and flaky test surge makes progress.
|
||||
|
||||
The Kubernetes contributing community meets most Thursdays at 10:00PT to discuss the project's status via a videoconference. Here are the notes from the latest meeting.
|
||||
|
||||
* Note taker: Rob Hirschfeld
|
||||
* Demo (20 min): CoreOS rkt + Kubernetes [Shaya Potter]
|
||||
* expect to see integrations w/ rkt & k8s in the coming months ("rkt-netes"). not integrated into the v1.2 release.
|
||||
* Shaya gave a demo (8 minutes into meeting for video reference)
|
||||
* CLI of rkt shown spinning up containers
|
||||
* [note: audio is garbled at points]
|
||||
* Discussion about integration w/ k8s & rkt
|
||||
* rkt community sync next week: https://groups.google.com/forum/#!topic/rkt-dev/FlwZVIEJGbY
|
||||
|
||||
* Dawn Chen:
|
||||
* The remaining issues of integrating rkt with kubernetes: 1) cadivsor 2) DNS 3) bugs related to logging
|
||||
* But need more work on e2e test suites
|
||||
* Use Case (10 min): eBay k8s on OpenStack and OpenStack on k8s [Ashwin Raveendran]
|
||||
* eBay is currently running Kubernetes on OpenStack
|
||||
* Goal for eBay is to manage the OpenStack control plane w/ k8s. Goal would be to achieve upgrades
|
||||
* OpenStack Kolla creates containers for the control plane. Uses Ansible+Docker for management of the containers.
|
||||
* Working on k8s control plan management - Saltstack is proving to be a management challenge at the scale they want to operate. Looking for automated management of the k8s control plane.
|
||||
* SIG Report
|
||||
* Testing update [Jeff, Joe, and Erick]
|
||||
* Working to make the workflow about contributing to K8s easier to understanding
|
||||
* [pull/19714][1] has flow chart of the bot flow to help users understand
|
||||
* Need a consistent way to run tests w/ hacking config scripts (you have to fake a Jenkins process right now)
|
||||
* Want to create necessary infrastructure to make test setup less flaky
|
||||
* want to decouple test start (single or full) from Jenkins
|
||||
* goal is to get to point where you have 1 script to run that can be pointed to any cluster
|
||||
* demo included Google internal views - working to try get that external.
|
||||
* want to be able to collect test run results
|
||||
* Bob Wise calls for testing infrastructure to be a blocker on v1.3
|
||||
* Long discussion about testing practices…
|
||||
* consensus that we want to have tests work over multiple platforms.
|
||||
* would be helpful to have a comprehensive state dump for test reports
|
||||
* "phone-home" to collect stack traces - should be available
|
||||
* 1.2 Release Watch
|
||||
* CoC [Sarah]
|
||||
* GSoC [Sarah]
|
||||
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel][2], taking a look at the [Kubernetes project][3] on GitHub, or join the [Kubernetes-dev Google group][4]. If you're really excited, you can do all of the above and join us for the next community conversation — February 11th, 2016. Please add yourself or a topic you want to know about to the [agenda][5] and get a calendar invitation by joining [this group][6].
|
||||
|
||||
"https://youtu.be/IScpP8Cj0hw?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ"
|
||||
|
||||
|
||||
[1]: https://github.com/kubernetes/kubernetes/pull/19714
|
||||
[2]: http://slack.k8s.io/
|
||||
[3]: https://github.com/kubernetes/
|
||||
[4]: https://groups.google.com/forum/#!forum/kubernetes-dev
|
||||
[5]: https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit#
|
||||
[6]: https://groups.google.com/forum/#!forum/kubernetes-community-video-chat
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Community Meeting Notes - 20160128 "
|
||||
date: Wednesday, February 02, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
##### January 28 - 1.2 release update, Deis demo, flaky test surge and SIGs
|
||||
|
||||
The Kubernetes contributing community meets once a week to discuss the project's status via a videoconference. Here are the notes from the latest meeting.
|
||||
|
||||
Note taker: Erin Boyd
|
||||
* Discuss process around code freeze/code slush (TJ Goltermann)
|
||||
* Code wind down was happening during holiday (for 1.1)
|
||||
* Releasing ~ every 3 months
|
||||
* Build stability is still missing
|
||||
* Issue on Transparency (Bob Wise)
|
||||
* Email from Sarah for call to contribute (Monday, January 25)
|
||||
* Concern over publishing dates / understanding release schedule /etc…
|
||||
* Release targeted for early March
|
||||
* Where does one find information on the release schedule with the committed features?
|
||||
* For 1.2 - Send email / Slack to TJ
|
||||
* For 1.3 - Working on better process to communicate to the community
|
||||
* Twitter
|
||||
* Wiki
|
||||
* GitHub Milestones
|
||||
* How to better communicate issues discovered in the SIG
|
||||
* AI: People need to email the kubernetes-dev@ mailing list with summary of findings
|
||||
* AI: Each SIG needs a note taker
|
||||
* Release planning vs Release testing
|
||||
* Testing SIG lead Ike McCreery
|
||||
* Also part of the testing infrastructure team at Google
|
||||
* Community being able to integrate into the testing framework
|
||||
* Federated testing
|
||||
* Release Manager = David McMahon
|
||||
* Request to introduce him to the community meeting
|
||||
* Demo: Jason Hansen Deis
|
||||
* Implemented simple REST API to interact with the platform
|
||||
* Deis managed application (deployed via)
|
||||
* Source -\> image
|
||||
* Rolling upgrades -\> Rollbacks
|
||||
* AI: Jason will provide the slides & notes
|
||||
* Slides: [https://speakerdeck.com/slack/kubernetes-community-meeting-demo-january-28th-2016](https://speakerdeck.com/slack/kubernetes-community-meeting-demo-january-28th-2016)
|
||||
* Alpha information: [https://groups.google.com/forum/#!topic/deis-users/Qhia4DD2pv4](https://groups.google.com/forum/#!topic/deis-users/Qhia4DD2pv4)
|
||||
* Adding an administrative component (dashboard)
|
||||
* Helm wraps kubectl
|
||||
* Testing
|
||||
* Called for community interaction
|
||||
* Need to understand friction points from community
|
||||
* Better documentation
|
||||
* Better communication on how things “should work"
|
||||
* Internally, Google is having daily calls to resolve test flakes
|
||||
* Started up SIG testing meetings (Tuesday at 10:30 am PT)
|
||||
* Everyone wants it, but no one want to pony up the time to make it happen
|
||||
* Google is dedicating headcount to it (3-4 people, possibly more)
|
||||
* [https://groups.google.com/forum/?hl=en#!forum/kubernetes-sig-testing](https://groups.google.com/forum/?hl=en#%21forum/kubernetes-sig-testing)
|
||||
* Best practices for labeling
|
||||
* Are there tools built on top of these to leverage
|
||||
* AI: Generate artifact for labels and what they do (Create doc)
|
||||
* Help Wanted Label - good for new community members
|
||||
* Classify labels for team and area
|
||||
* User experience, test infrastructure, etc..
|
||||
* SIG Config (not about deployment)
|
||||
* Any interest in ansible, etc.. type
|
||||
* SIG Scale meeting (Bob Wise & Tim StClair)
|
||||
* Tests related to performance SLA get relaxed in order to get the tests to pass
|
||||
* exposed process issues
|
||||
* AI: outline of a proposal for a notice policy if things are being changed that are critical to the system (Bob Wise/Samsung)
|
||||
* Create a Best Practices of set of constants into well documented place
|
||||
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel](http://slack.k8s.io/), taking a look at the [Kubernetes project](https://github.com/kubernetes/) on GitHub, or join the [Kubernetes-dev Google group](https://groups.google.com/forum/#!forum/kubernetes-dev). If you’re really excited, you can do all of the above and join us for the next community conversation — February 4th, 2016. Please add yourself or a topic you want to know about to the [agenda](https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit) and get a calendar invitation by joining [this group](https://groups.google.com/forum/#!forum/kubernetes-community-video-chat).
|
||||
|
||||
The full recording is available on YouTube in the growing archive of [Kubernetes Community Meetings](https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ).
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Community Meeting Notes - 20160211 "
|
||||
date: Wednesday, February 16, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
##### February 11th - Pangaea Demo, #AWS SIG formed, release automation and documentation team introductions. 1.2 update and planning 1.3.
|
||||
|
||||
|
||||
The Kubernetes contributing community meets most Thursdays at 10:00PT to discuss the project's status via videoconference. Here are the notes from the latest meeting.
|
||||
|
||||
Note taker: Rob Hirschfeld
|
||||
* Demo: [Pangaea][1] [Shahidh K Muhammed, Tanmai Gopal, and Akshaya Acharya]
|
||||
|
||||
* Microservices packages
|
||||
* Focused on Application developers
|
||||
* Demo at recording +4 minutes
|
||||
* Single node kubernetes cluster — runs locally using Vagrant CoreOS image
|
||||
* Single user/system cluster allows use of DNS integration (unlike Compose)
|
||||
* Can run locally or in cloud
|
||||
* *SIG Report:*
|
||||
* Release Automation and an introduction to David McMahon
|
||||
* Docs and k8s website redesign proposal and an introduction to John Mulhausen
|
||||
* This will allow the system to build docs correctly from Github w/ minimal effort
|
||||
* Will be check-in triggered
|
||||
* Getting website style updates
|
||||
* Want to keep authoring really light
|
||||
* There will be some automated checks
|
||||
* Next week: preview of the new website during the community meeting
|
||||
* [@goltermann] 1.2 Release Watch (time +34 minutes)
|
||||
* code slush date: 2/9/2016
|
||||
* no major features or refactors accepted
|
||||
* discussion about release criteria: we will hold release date for bugs
|
||||
* Testing flake surge is over (one time event and then maintain test stability)
|
||||
* 1.3 Planning (time +40 minutes)
|
||||
* working to cleanup the Github milestones — they should be a source of truth. you can use Github for bug reporting
|
||||
* push off discussion while 1.2 crunch is under
|
||||
* Framework
|
||||
* dates
|
||||
* prioritization
|
||||
* feedback
|
||||
* Design Review meetings
|
||||
* General discussion about the PRD process — still at the beginning states
|
||||
* Working on a contributor conference
|
||||
* Rob suggested tracking relationships between PRD/Mgmr authors
|
||||
* PLEASE DO REVIEWS — talked about the way people are authorized to +2 reviews.
|
||||
|
||||
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel,][2] taking a look at the [Kubernetes][3] project on GitHub, or join the [Kubernetes-dev Google group][4]. If you're really excited, you can do all of the above and join us for the next community conversation — February 18th, 2016. Please add yourself or a topic you want to know about to the [agenda][5] and get a calendar invitation by joining [this group][6].
|
||||
|
||||
The full recording is available on YouTube in the growing archive of [Kubernetes Community Meetings][7].
|
||||
|
||||
[1]: http://hasura.io/blog/pangaea-point-and-shoot-kubernetes/
|
||||
[2]: http://slack.k8s.io/
|
||||
[3]: https://github.com/kubernetes/
|
||||
[4]: https://groups.google.com/forum/#!forum/kubernetes-dev
|
||||
[5]: https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit
|
||||
[6]: https://groups.google.com/forum/#!forum/kubernetes-community-video-chat
|
||||
[7]: https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " ShareThis: Kubernetes In Production "
|
||||
date: Friday, February 11, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Today’s guest blog post is by Juan Valencia, Technical Lead at ShareThis, a service that helps website publishers drive engagement and consumer sharing behavior across social networks.
|
||||
|
||||
ShareThis has grown tremendously since its first days as a tiny widget that allowed you to share to your favorite social services. It now serves over 4.5 million domains per month, helping publishers create a more authentic digital experience.
|
||||
|
||||
Fast growth came with a price. We leveraged technical debt to scale fast and to grow our products, particularly when it came to infrastructure. As our company expanded, the infrastructure costs mounted as well - both in terms of inefficient utilization and in terms of people costs. About 1 year ago, it became clear something needed to change.
|
||||
|
||||
### TL;DRKubernetes has been a key component for us to reduce technical debt in our infrastructure by:
|
||||
|
||||
* Fostering the Adoption of Docker
|
||||
* Simplifying Container Management
|
||||
* Onboarding Developers On Infrastructure
|
||||
* Unlocking Continuous Integration and Delivery
|
||||
We accomplished this by radically adopting Kubernetes and switching our DevOps team to a Cloud Platform team that worked in terms of containers and microservices. This included creating some tools to get around our own legacy debt.
|
||||
|
||||
### The Problem
|
||||
|
||||
Alas, the cloud was new and we were young. We started with a traditional data-center mindset. We managed all of our own services: MySQL, Cassandra, Aerospike, Memcache, you name it. We set up VM’s just like you would traditional servers, installed our applications on them, and managed them in Nagios or Ganglia.
|
||||
|
||||
Unfortunately, this way of thinking was antithetical to a cloud-centric approach. Instead of thinking in terms of services, we were thinking in terms of servers. Instead of using modern cloud approaches such as autoscaling, microservices, or even managed VM’s, we were thinking in terms of scripted setups, server deployments, and avoiding vendor lock-in.
|
||||
|
||||
These ways of thinking were not bad per se, they were simply inefficient. They weren’t taking advantage of the changes to the cloud that were happening very quickly. It also meant that when changes needed to take place, we were treating those changes as big slow changes to a datacenter, rather than small fast changes to the cloud.
|
||||
|
||||
### The Solution
|
||||
|
||||
#### Kubernetes As A Tool To Foster Docker Adoption
|
||||
|
||||
As Docker became more of a force in our industry, engineers at ShareThis also started experimenting with it to good effect. It soon became obvious that we needed to have a working container for every app in our company just so we could simplify testing in our development environment.
|
||||
|
||||
Some apps moved quickly into Docker because they were simple and had few dependencies. For those that had small dependencies, we were able to manage using Fig (Fig was the original name of Docker Compose). Still, many of our data pipelines or interdependent apps were too gnarly to be directly dockerized. We still wanted to do it, but Docker was not enough.
|
||||
|
||||
In late 2015, we were frustrated enough with our legacy infrastructure that we finally bit the bullet. We evaluated Docker’s tools, ECS, Kubernetes, and Mesosphere. It was quickly obvious that Kubernetes was in a more stable and user friendly state than its competitors for our infrastructure. As a company, we could solidify our infrastructure on Docker by simply setting the goal of having all of our infrastructure on Kubernetes.
|
||||
|
||||
Engineers were skeptical at first. However, once they saw applications scale effortlessly into hundreds of instances per application, they were hooked. Now, not only was there the pain points driving us forward into Docker and by extension Kubernetes, but there was genuine excitement for the technology pulling us in. This has allowed us to make an incredibly difficult migration fairly quickly. We now run Kubernetes in multiple regions on about 65 large VMs and increasing to over 100 in the next couple months. Our Kubernetes cluster currently processes 800 million requests per day with the plan to process over 2 billion requests per day in the coming months.
|
||||
|
||||
#### Kubernetes As A Tool To Manage Containers
|
||||
|
||||
Our earliest use of Docker was promising for development, but not so much so for production. The biggest friction point was the inability to manage Docker components at scale. Knowing which containers were running where, what version of a deployment was running, what state an app was in, how to manage subnets and VPCs, etc, plagued any chance of it going to production. The tooling required would have been substantial.
|
||||
|
||||
|
||||
|
||||
When you look at Kubernetes, there are several key features that were immediately attractive:
|
||||
|
||||
* It is easy to install on AWS (where all our apps were running)
|
||||
* There is a direct path from a Dockerfile to a replication controller through a yaml/json file
|
||||
* Pods are able to scale in number easily
|
||||
* We can easily scale the number of VM’s running on AWS in a Kubernetes cluster
|
||||
* Rolling deployments and rollback are built into the tooling
|
||||
* Each pod gets monitored through health checks
|
||||
* Service endpoints are managed by the tool
|
||||
* There is an active and vibrant community
|
||||
|
||||
|
||||
|
||||
Unfortunately, one of the biggest pain points was that the tooling didn’t solve our existing legacy infrastructure, it just provided an infrastructure to move onto. There were still a variety of network quirks which disallowed us from directly moving our applications onto a new VPC. In addition, the reworking of so many applications required developers to jump onto problems that have classically been solved by sys admins and operations teams.
|
||||
|
||||
#### Kubernetes As A Tool For Onboarding Developers On Infrastructure
|
||||
|
||||
When we decided to make the switch from what was essentially a Chef-run setup to Kubernetes, I do not think we understood all of the pain points that we would hit. We ran our servers in a variety of different ways in a variety of different network configurations that were considerably different than the clean setup that you find on a fresh Kubernetes VPC.
|
||||
|
||||
In production we ran in both AWS VPCs and AWS classic across multiple regions. This means that we managed several subnets with different access controls across different applications. Our most recent applications were also very secure, having no public endpoints. This meant that we had a combination of VPC peering, network address translation (NAT), and proxies running in varied configurations.
|
||||
|
||||
In the Kubernetes world, there’s only the VPC. All the pods can theoretically talk to each other, and services endpoints are explicitly defined. It’s easy for the developer to gloss over some of the details and it removes the need for operations (mostly).
|
||||
|
||||
We made the decision to convert all of our infrastructure / DevOps developers into application developers (really!). We had already started hiring them on the basis of their development skills rather than their operational skills anyway, so perhaps that is not as wild as it sounds.
|
||||
|
||||
We then made the decision to onboard our entire engineering organization onto Operations. Developers are flexible, they enjoy challenges, and they enjoy learning. It was remarkable. After 1 month, our organization went from having a few DevOps folks, to having every engineer capable of modifying our architecture.
|
||||
|
||||
The training ground for onboarding on networking, productionization, problem solving, root cause analysis, etc, was getting Kubernetes into prod at scale. After the first month, I was biting my nails and worrying about our choices. After 2 months, it looked like it might some day be viable. After 3 months, we were deploying 10 times per week. After 4 months, 40 apps per week. Only 30% of our apps have been migrated, yet the gains are not only remarkable, they are astounding. Kubernetes allowed us to go from an infrastructure-is-slowing-us-down-ugh! organization, to an infrastructure-is-speeding-us-up-yay! organization.
|
||||
|
||||
#### Kubernetes As A Means To Unlock Continuous Integration And Delivery
|
||||
|
||||
How did we get to 40+ deployments per week? Put simply, continuous integration and deployment (CI/CD) came as a byproduct of our migration. Our first application in Kubernetes was Jenkins, and every app that went in also was added to Jenkins. As we moved forward, we made Jenkins more automatic until pods were being added and taken from Kubernetes faster than we could keep track.
|
||||
|
||||
Interestingly, our problems with scaling are now about wanting to push out too many changes at once and people having to wait until their turn. Our goal is to get 100 deployments per week through the new infrastructure. This is achievable if we can continue to execute on our migration and on our commitment to a CI/CD process on Kubernetes and Jenkins.
|
||||
|
||||
### Next Steps
|
||||
|
||||
We need to finish our migration. At this point the problems are mostly solved, the biggest difficulties are in the tedium of the task at hand. To move things out of our legacy infrastructure meant changing the network configurations to allow access to and from the Kubernetes VPC and across the regions. This is still a very real pain, and one we continue to address.
|
||||
|
||||
Some services do not play well in Kubernetes -- think stateful distributed databases. Luckily, we can usually migrate those to a 3rd party who will manage it for us. At the end of this migration, we will only be running pods on Kubernetes. Our infrastructure will become much simpler.
|
||||
|
||||
All these changes do not come for free; committing our entire infrastructure to Kubernetes means that we need to have Kubernetes experts. Our team has been unblocked in terms of infrastructure and they are busy adding business value through application development (as they should). However, we do not (yet) have committed engineers to stay up to date with changes to Kubernetes and cloud computing.
|
||||
|
||||
As such, we have transferred one engineer to a new “cloud platform team” and will hire a couple of others (have I mentioned [we’re hiring](http://www.sharethis.com/hiring.html)!). They will be responsible for developing tools that we can use to interface well with Kubernetes and manage all of our cloud resources. In addition, they will be working in the Kubernetes source code, part of Kubernetes SIGs, and ideally, pushing code into the open source project.
|
||||
|
||||
### Summary
|
||||
All in all, while the move to Kubernetes initially seemed daunting, it was far less complicated and disruptive than we thought. And the reward at the other end was a company that could respond as fast as our customers wanted._Editor's note: at a recent Kubernetes meetup, the team at ShareThis gave a talk about their production use of Kubernetes. Video is embedded below._
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " State of the Container World, January 2016 "
|
||||
date: Tuesday, February 01, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
At the start of the new year, we sent out a survey to gauge the state of the container world. We’re ready to send the [February edition](https://docs.google.com/forms/d/13yxxBqb5igUhwrrnDExLzZPjREiCnSs-AH-y4SSZ-5c/viewform), but before we do, let’s take a look at the January data from the 119 responses (thank you for participating!).
|
||||
|
||||
A note about these numbers: First, you may notice that the numbers don’t add up to 100%, the choices were not exclusive in most cases and so percentages given are the percentage of all respondents who selected a particular choice. Second, while we attempted to reach a broad cross-section of the cloud community, the survey was initially sent out via Twitter to followers of [@brendandburns](https://twitter.com/brendandburns), [@kelseyhightower](https://twitter.com/kelseyhightower), [@sarahnovotny](https://twitter.com/sarahnovotny), [@juliaferraioli](https://twitter.com/juliaferraioli), [@thagomizer\_rb](https://twitter.com/thagomizer_rb), so the audience is likely not a perfect cross-section. We’re working to broaden our sample size (have I mentioned our February survey? [Come take it now](https://docs.google.com/forms/d/13yxxBqb5igUhwrrnDExLzZPjREiCnSs-AH-y4SSZ-5c/viewform)).
|
||||
|
||||
#### Now, without further ado, the data:
|
||||
First off, lots of you are using containers! 71% are currently using containers, while 24% of you are considering using them soon. Obviously this indicates a somewhat biased sample set. Numbers for container usage in the broader community vary, but are definitely lower than 71%. Consequently, take all of the rest of these numbers with a grain of salt.
|
||||
|
||||
So what are folks using containers for? More than 80% of respondents are using containers for development, while only 50% are using containers for production. But you plan to move to production soon, as 78% of container users said that you were planning on moving to production sometime soon.
|
||||
|
||||
Where do you deploy containers? Your laptop was the clear winner here, with 53% of folks deploying to laptops. Next up was 44% of people running on their own VMs (Vagrant? OpenStack? we’ll try dive into this in the February survey), followed by 33% of folks running on physical infrastructure, and 31% on public cloud VMs.
|
||||
|
||||
And how are you deploying containers? 54% of you are using Kubernetes, awesome to see, though likely somewhat biased by the sample set (see the notes above), possibly more surprising, 45% of you are using shell scripts. Is it because of the extensive (and awesome) Bash scripting going on in the Kubernetes repository? Go on, you can tell me the truth… Rounding out the numbers, 25% are using CAPS (Chef/Ansible/Puppet/Salt) systems, and roughly 13% are using Docker Swarm, Mesos or other systems.
|
||||
|
||||
Finally, we asked people for free-text answers about the challenges of working with containers. Some of the most interesting answers are grouped and reproduced here:
|
||||
|
||||
|
||||
###### Development Complexity
|
||||
|
||||
- “Silo'd development environments / workflows can be fragmented, ease of access to tools like logs is available when debugging containers but not intuitive at times, massive amounts of knowledge is required to grasp the whole infrastructure stack and best practices from say deploying / updating kubernetes, to underlying networking etc.”
|
||||
- “Migrating developer workflow. People uninitiated with containers, volumes, etc just want to work.”
|
||||
|
||||
|
||||
###### Security
|
||||
|
||||
- “Network Security”
|
||||
- “Secrets”
|
||||
|
||||
|
||||
###### Immaturity
|
||||
|
||||
- “Lack of a comprehensive non-proprietary standard (i.e. non-Docker) like e.g runC / OCI”
|
||||
- “Still early stage with few tools and many missing features.”
|
||||
- “Poor CI support, a lot of tooling still in very early days.”
|
||||
- "We've never done it that way before."
|
||||
|
||||
|
||||
###### Complexity
|
||||
|
||||
- “Networking support, providing ip per pod on bare metal for kubernetes”
|
||||
- “Clustering is still too hard”
|
||||
- “Setting up Mesos and Kubernetes too damn complicated!!”
|
||||
|
||||
###### Data
|
||||
|
||||
- “Lack of flexibility of volumes (which is the same problem with VMs, physical hardware, etc)”
|
||||
- “Persistency”
|
||||
- “Storage”
|
||||
- “Persistent Data”
|
||||
|
||||
_Download the full survey results [here](https://docs.google.com/spreadsheets/d/18wZe7wEDvRuT78CEifs13maXoSGem_hJvbOSmsuJtkA/pub?gid=530616014&single=true&output=csv) (CSV file)._
|
||||
|
||||
_Update: 2/1/2015 - Fixed the CSV link._
|
||||
|
||||
-- Brendan Burns, Software Engineer, Google
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Community Meeting Notes - 20160218 "
|
||||
date: Wednesday, February 23, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
##### February 18th - kmachine demo, clusterops SIG formed, new k8s.io website preview, 1.2 update and planning 1.3
|
||||
The Kubernetes contributing community meets most Thursdays at 10:00PT to discuss the project's status via videoconference. Here are the notes from the latest meeting.
|
||||
|
||||
* Note taker: Rob Hirschfeld
|
||||
* Demo (10 min): [kmachine][1] [Sebastien Goasguen]
|
||||
* started :01 intro video
|
||||
* looking to create mirror of Docker tools for Kubernetes (similar to machine, compose, etc)
|
||||
* kmachine (forked from Docker Machine, so has the same endpoints)
|
||||
* Use Case (10 min): started at :15
|
||||
* SIG Report starter
|
||||
* Cluster Ops launch meeting Friday ([doc][2]). [Rob Hirschfeld]
|
||||
* Time Zone Discussion [:22]
|
||||
* This timezone does not work for Asia.
|
||||
* Considering rotation - once per month
|
||||
* Likely 5 or 6 PT
|
||||
* Rob suggested moving the regular meeting up a little
|
||||
* k8s.io website preview [John Mulhausen] [:27]
|
||||
* using github for docs. you can fork and do a pull request against the site
|
||||
* will be it's own kubernetes organization BUT not in the code repo
|
||||
* Google will offer a "doc bounty" where you can get GCP credits for working on docs
|
||||
* Uses Jekyll to generate the site (e.g. the ToC)
|
||||
* Principle will be to 100% GitHub Pages; no script trickery or plugins, just fork/clone, edit, and push
|
||||
* Hope to launch at Kubecon EU
|
||||
* Home Page Only Preview: http://kub.unitedcreations.xyz
|
||||
* 1.2 Release Watch [T.J. Goltermann] [:38]
|
||||
* 1.3 Planning update [T.J. Goltermann]
|
||||
* GSoC participation -- deadline 2/19 [Sarah Novotny]
|
||||
* March 10th meeting? [Sarah Novotny]
|
||||
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel][3], taking a look at the [Kubernetes project][4] on GitHub, or join the [Kubernetes-dev Google group][5]. If you're really excited, you can do all of the above and join us for the next community conversation — February 25th, 2016. Please add yourself or a topic you want to know about to the [agenda][6] and get a calendar invitation by joining [this group][7].
|
||||
|
||||
"https://youtu.be/L5BgX2VJhlY?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ"
|
||||
|
||||
_\-- Kubernetes Community_
|
||||
|
||||
[1]: https://github.com/skippbox/kmachine
|
||||
[2]: https://docs.google.com/document/d/1IhN5v6MjcAUrvLd9dAWtKcGWBWSaRU8DNyPiof3gYMY/edit#
|
||||
[3]: http://slack.k8s.io/
|
||||
[4]: https://github.com/kubernetes/
|
||||
[5]: https://groups.google.com/forum/#!forum/kubernetes-dev
|
||||
[6]: https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit#
|
||||
[7]: https://groups.google.com/forum/#!forum/kubernetes-community-video-chat
|
||||
|
|
@ -0,0 +1,174 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " 1000 nodes and beyond: updates to Kubernetes performance and scalability in 1.2 "
|
||||
date: Tuesday, March 28, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor's note: this is the first in a [series of in-depth posts](http://blog.kubernetes.io/2016/03/five-days-of-kubernetes-12.html) on what's new in Kubernetes 1.2_
|
||||
|
||||
We're proud to announce that with the [release of 1.2](http://blog.kubernetes.io/2016/03/Kubernetes-1.2-even-more-performance-upgrades-plus-easier-application-deployment-and-management-.html), Kubernetes now supports 1000-node clusters, with a reduction of 80% in 99th percentile tail latency for most API operations. This means in just six months, we've increased our overall scale by 10 times while maintaining a great user experience — the 99th percentile pod startup times are less than 3 seconds, and 99th percentile latency of most API operations is tens of milliseconds (the exception being LIST operations, which take hundreds of milliseconds in very large clusters).
|
||||
|
||||
Words are fine, but nothing speaks louder than a demo. Check this out!
|
||||
|
||||
|
||||
|
||||
In the above video, you saw the cluster scale up to 10 M queries per second (QPS) over 1,000 nodes, including a rolling update, with zero downtime and no impact to tail latency. That’s big enough to be one of the top 100 sites on the Internet!
|
||||
|
||||
In this blog post, we’ll cover the work we did to achieve this result, and discuss some of our future plans for scaling even higher.
|
||||
|
||||
|
||||
### Methodology
|
||||
We benchmark Kubernetes scalability against the following Service Level Objectives (SLOs):
|
||||
|
||||
1. **API responsiveness** <sup>[1](https://www.blogger.com/blogger.g?blogID=112706738355446097#1)</sup> 99% of all API calls return in less than 1s
|
||||
2. **Pod startup time** : 99% of pods and their containers (with pre-pulled images) start within 5s.
|
||||
We say Kubernetes scales to a certain number of nodes only if both of these SLOs are met. We continuously collect and report the measurements described above as part of the project test framework. This battery of tests breaks down into two parts: API responsiveness and Pod Startup Time.
|
||||
|
||||
|
||||
### API responsiveness for user-level abstractions[2](https://www.blogger.com/blogger.g?blogID=112706738355446097#2) {: .sup }
|
||||
Kubernetes offers high-level abstractions for users to represent their applications. For example, the ReplicationController is an abstraction representing a collection of [pods](http://kubernetes.io/docs/user-guide/pods/). Listing all ReplicationControllers or listing all pods from a given ReplicationController is a very common use case. On the other hand, there is little reason someone would want to list all pods in the system — for example, 30,000 pods (1000 nodes with 30 pods per node) represent ~150MB of data (~5kB/pod \* 30k pods). So this test uses ReplicationControllers.
|
||||
|
||||
For this test (assuming N to be number of nodes in the cluster), we:
|
||||
|
||||
1. Create roughly 3xN ReplicationControllers of different sizes (5, 30 and 250 replicas), which altogether have 30xN replicas. We spread their creation over time (i.e. we don’t start all of them at once) and wait until all of them are running.
|
||||
|
||||
2. Perform a few operations on every ReplicationController (scale it, list all its instances, etc.), spreading those over time, and measuring the latency of each operation. This is similar to what a real user might do in the course of normal cluster operation.
|
||||
|
||||
3. Stop and delete all ReplicationControllers in the system.
|
||||
For results of this test see the “Metrics for Kubernetes 1.2” section below.
|
||||
|
||||
For the v1.3 release, we plan to extend this test by also creating Services, Deployments, DaemonSets, and other API objects.
|
||||
|
||||
|
||||
### Pod startup end-to-end latency[3](https://www.blogger.com/blogger.g?blogID=112706738355446097#3)
|
||||
Users are also very interested in how long it takes Kubernetes to schedule and start a pod. This is true not only upon initial creation, but also when a ReplicationController needs to create a replacement pod to take over from one whose node failed.
|
||||
|
||||
We (assuming N to be the number of nodes in the cluster):
|
||||
|
||||
1. Create a single ReplicationController with 30xN replicas and wait until all of them are running. We are also running high-density tests, with 100xN replicas, but with fewer nodes in the cluster.
|
||||
|
||||
2. Launch a series of single-pod ReplicationControllers - one every 200ms. For each, we measure “total end-to-end startup time” (defined below).
|
||||
|
||||
3. Stop and delete all pods and replication controllers in the system.
|
||||
We define “total end-to-end startup time” as the time from the moment the client sends the API server a request to create a ReplicationController, to the moment when “running & ready” pod status is returned to the client via watch. That means that “pod startup time” includes the ReplicationController being created and in turn creating a pod, scheduler scheduling that pod, Kubernetes setting up intra-pod networking, starting containers, waiting until the pod is successfully responding to health-checks, and then finally waiting until the pod has reported its status back to the API server and then API server reported it via watch to the client.
|
||||
|
||||
While we could have decreased the “pod startup time” substantially by excluding for example waiting for report via watch, or creating pods directly rather than through ReplicationControllers, we believe that a broad definition that maps to the most realistic use cases is the best for real users to understand the performance they can expect from the system.
|
||||
|
||||
|
||||
### Metrics from Kubernetes 1.2
|
||||
|
||||
So what was the result?We run our tests on Google Compute Engine, setting the size of the master VM based on on the size of the Kubernetes cluster. In particular for 1000-node clusters we use a n1-standard-32 VM for the master (32 cores, 120GB RAM).
|
||||
|
||||
|
||||
#### API responsiveness
|
||||
The following two charts present a comparison of 99th percentile API call latencies for the Kubernetes 1.2 release and the 1.0 release on 100-node clusters. (Smaller bars are better)
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
We present results for LIST operations separately, since these latencies are significantly higher. Note that we slightly modified our tests in the meantime, so running current tests against v1.0 would result in higher latencies than they used to.
|
||||
|
||||
|
||||

|
||||
|
||||
We also ran these tests against 1000-node clusters. Note: We did not support clusters larger than 100 on GKE, so we do not have metrics to compare these results to. However, customers have reported running on 1,000+ node clusters since Kubernetes 1.0.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
Since LIST operations are significantly larger, we again present them separately: All latencies, in both cluster sizes, are well within our 1 second SLO.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
### Pod startup end-to-end latency
|
||||
The results for “pod startup latency” (as defined in the “Pod-Startup end-to-end latency” section) are presented in the following graph. For reference we are presenting also results from v1.0 for 100-node clusters in the first part of the graph.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
As you can see, we substantially reduced tail latency in 100-node clusters, and now deliver low pod startup latency up to the largest cluster sizes we have measured. It is noteworthy that the metrics for 1000-node clusters, for both API latency and pod startup latency, are generally better than those reported for 100-node clusters just six months ago!
|
||||
|
||||
|
||||
### How did we make these improvements?
|
||||
|
||||
To make these significant gains in scale and performance over the past six months, we made a number of improvements across the whole system. Some of the most important ones are listed below.
|
||||
|
||||
|
||||
- _ **Created a “read cache” at the API server level ** _
|
||||
([https://github.com/kubernetes/kubernetes/issues/15945](https://github.com/kubernetes/kubernetes/issues/15945) )
|
||||
|
||||
Since most Kubernetes control logic operates on an ordered, consistent snapshot kept up-to-date by etcd watches (via the API server), a slight delay in that arrival of that data has no impact on the correct operation of the cluster. These independent controller loops, distributed by design for extensibility of the system, are happy to trade a bit of latency for an increase in overall throughput.
|
||||
|
||||
In Kubernetes 1.2 we exploited this fact to improve performance and scalability by adding an API server read cache. With this change, the API server’s clients can read data from an in-memory cache in the API server instead of reading it from etcd. The cache is updated directly from etcd via watch in the background. Those clients that can tolerate latency in retrieving data (usually the lag of cache is on the order of tens of milliseconds) can be served entirely from cache, reducing the load on etcd and increasing the throughput of the server. This is a continuation of an optimization begun in v1.1, where we added support for serving watch directly from the API server instead of etcd:[https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/apiserver-watch.md](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/apiserver-watch.md).
|
||||
|
||||
Thanks to contributions from Wojciech Tyczynski at Google and Clayton Coleman and Timothy St. Clair at Red Hat, we were able to join careful system design with the unique advantages of etcd to improve the scalability and performance of Kubernetes.
|
||||
- **Introduce a “Pod Lifecycle Event Generator” (PLEG) in the Kubelet** ([https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/pod-lifecycle-event-generator.md](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/pod-lifecycle-event-generator.md))
|
||||
|
||||
Kubernetes 1.2 also improved density from a pods-per-node perspective — for v1.2 we test and advertise up to 100 pods on a single node (vs 30 pods in the 1.1 release). This improvement was possible because of diligent work by the Kubernetes community through an implementation of the Pod Lifecycle Event Generator (PLEG).
|
||||
|
||||
The Kubelet (the Kubernetes node agent) has a worker thread per pod which is responsible for managing the pod’s lifecycle. In earlier releases each worker would periodically poll the underlying container runtime (Docker) to detect state changes, and perform any necessary actions to ensure the node’s state matched the desired state (e.g. by starting and stopping containers). As pod density increased, concurrent polling from each worker would overwhelm the Docker runtime, leading to serious reliability and performance issues (including additional CPU utilization which was one of the limiting factors for scaling up).
|
||||
|
||||
To address this problem we introduced a new Kubelet subcomponent — the PLEG — to centralize state change detection and generate lifecycle events for the workers. With concurrent polling eliminated, we were able to lower the steady-state CPU usage of Kubelet and the container runtime by 4x. This also allowed us to adopt a shorter polling period, so as to detect and react to changes more quickly.
|
||||
 
|
||||
|
||||
|
||||
- **Improved scheduler throughput** Kubernetes community members from CoreOS (Hongchao Deng and Xiang Li) helped to dive deep into the Kubernetes scheduler and dramatically improve throughput without sacrificing accuracy or flexibility. They cut total time to schedule 30,000 pods by nearly 1400%! You can read a great blog post on how they approached the problem here: [https://coreos.com/blog/improving-kubernetes-scheduler-performance.html](https://coreos.com/blog/improving-kubernetes-scheduler-performance.html)
|
||||
|
||||
- **A more efficient JSON parser** Go’s standard library includes a flexible and easy-to-use JSON parser that can encode and decode any Go struct using the reflection API. But that flexibility comes with a cost — reflection allocates lots of small objects that have to be tracked and garbage collected by the runtime. Our profiling bore that out, showing that a large chunk of both client and server time was spent in serialization. Given that our types don’t change frequently, we suspected that a significant amount of reflection could be bypassed through code generation.
|
||||
|
||||
After surveying the Go JSON landscape and conducting some initial tests, we found the [ugorji codec](https://github.com/ugorji/go) library offered the most significant speedups - a 200% improvement in encoding and decoding JSON when using generated serializers, with a significant reduction in object allocations. After contributing fixes to the upstream library to deal with some of our complex structures, we switched Kubernetes and the go-etcd client library over. Along with some other important optimizations in the layers above and below JSON, we were able to slash the cost in CPU time of almost all API operations, especially reads.
|
||||
|
||||
- Other notable changes led to significant wins, including:
|
||||
|
||||
- Reducing the number of broken TCP connections, which were causing unnecessary new TLS sessions: [https://github.com/kubernetes/kubernetes/issues/15664](https://github.com/kubernetes/kubernetes/issues/15664)
|
||||
|
||||

|
||||
|
||||
- Improving the performance of ReplicationController in large clusters:[https://github.com/kubernetes/kubernetes/issues/21672](https://github.com/kubernetes/kubernetes/issues/21672)
|
||||
|
||||
In both cases, the problem was debugged and/or fixed by Kubernetes community members, including Andy Goldstein and Jordan Liggitt from Red Hat, and Liang Mingqiang from NetEase.
|
||||
|
||||
### Kubernetes 1.3 and Beyond
|
||||
|
||||
Of course, our job is not finished. We will continue to invest in improving Kubernetes performance, as we would like it to scale to many thousands of nodes, just like Google’s [Borg](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43438.pdf). Thanks to our investment in testing infrastructure and our focus on how teams use containers in production, we have already identified the next steps on our path to improving scale.
|
||||
|
||||
|
||||
|
||||
On deck for Kubernetes 1.3:
|
||||
|
||||
1. Our main bottleneck is still the API server, which spends the majority of its time just marshaling and unmarshaling JSON objects. We plan to [add support for protocol buffers](https://github.com/kubernetes/kubernetes/pull/22600) to the API as an optional path for inter-component communication and for storing objects in etcd. Users will still be able to use JSON to communicate with the API server, but since the majority of Kubernetes communication is intra-cluster (API server to node, scheduler to API server, etc.) we expect a significant reduction in CPU and memory usage on the master.
|
||||
|
||||
2. Kubernetes uses labels to identify sets of objects; For example, identifying which pods belong to a given ReplicationController requires iterating over all pods in a namespace and choosing those that match the controller’s label selector. The addition of an efficient indexer for labels that can take advantage of the existing API object cache will make it possible to quickly find the objects that match a label selector, making this common operation much faster.
|
||||
|
||||
3. Scheduling decisions are based on a number of different factors, including spreading pods based on requested resources, spreading pods with the same selectors (e.g. from the same Service, ReplicationController, Job, etc.), presence of needed container images on the node, etc. Those calculations, in particular selector spreading, have many opportunities for improvement — see [https://github.com/kubernetes/kubernetes/issues/22262](https://github.com/kubernetes/kubernetes/issues/22262) for just one suggested change.
|
||||
|
||||
4. We are also excited about the upcoming etcd v3.0 release, which was designed with Kubernetes use case in mind — it will both improve performance and introduce new features. Contributors from CoreOS have already begun laying the groundwork for moving Kubernetes to etcd v3.0 (see [https://github.com/kubernetes/kubernetes/pull/22604](https://github.com/kubernetes/kubernetes/pull/22604)).
|
||||
While this list does not capture all the efforts around performance, we are optimistic we will achieve as big a performance gain as we saw going from Kubernetes 1.0 to 1.2.
|
||||
|
||||
### Conclusion
|
||||
|
||||
In the last six months we’ve significantly improved Kubernetes scalability, allowing v1.2 to run 1000-node clusters with the same excellent responsiveness (as measured by our SLOs) as we were previously achieving only on much smaller clusters. But that isn’t enough — we want to push Kubernetes even further and faster. Kubernetes v1.3 will improve the system’s scalability and responsiveness further, while continuing to add features that make it easier to build and run the most demanding container-based applications.
|
||||
|
||||
|
||||
|
||||
Please join our community and help us build the future of Kubernetes! There are many ways to participate. If you’re particularly interested in scalability, you’ll be interested in:
|
||||
|
||||
- Our [scalability slack channel](https://kubernetes.slack.com/messages/sig-scale/)
|
||||
- The scalability “Special Interest Group”, which meets every Thursday at 9 AM Pacific Time at [SIG-Scale hangout](https://plus.google.com/hangouts/_/google.com/k8scale-hangout)
|
||||
And of course for more information about the project in general, go to [www.kubernetes.io](http://www.kubernetes.io/)
|
||||
|
||||
- _Wojciech Tyczynski, Software Engineer, Google_
|
||||
|
||||
|
||||
* * *
|
||||
[**1**](https://www.blogger.com/null)We exclude operations on “events” since these are more like system logs and are not required for the system to operate properly.
|
||||
[**2**](https://www.blogger.com/null)This is test/e2e/load.go from the Kubernetes github repository.
|
||||
[**3**](https://www.blogger.com/null)This is test/e2e/density.go test from the Kubernetes github repository
|
||||
[**4**](https://www.blogger.com/null)We are looking into optimizing this in the next release, but for now using a smaller master can result in significant (order of magnitude) performance degradation. We encourage anyone running benchmarking against Kubernetes or attempting to replicate these findings to use a similarly sized master, or performance will suffer.
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " AppFormix: Helping Enterprises Operationalize Kubernetes "
|
||||
date: Wednesday, March 29, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today’s guest post is written Sumeet Singh, founder and CEO of [AppFormix](http://www.appformix.com/), a cloud infrastructure performance optimization service helping enterprise operators streamline their cloud operations on any OpenStack or Kubernetes cloud._
|
||||
|
||||
If you run clouds for a living, you’re well aware that the tools we've used since the client/server era for monitoring, analytics and optimization just don’t cut it when applied to the agile, dynamic and rapidly changing world of modern cloud infrastructure.
|
||||
|
||||
And, if you’re an operator of enterprise clouds, you know that implementing containers and container cluster management is all about giving your application developers a more agile, responsive and efficient cloud infrastructure. Applications are being rewritten and new ones developed – not for legacy environments where relatively static workloads are the norm, but for dynamic, scalable cloud environments. The dynamic nature of cloud native applications coupled with the shift to continuous deployment means that the demands placed by the applications on the infrastructure are constantly changing.
|
||||
|
||||
This shift necessitates infrastructure transparency and real-time monitoring and analytics. Without these key pieces, neither applications nor their underlying plumbing can deliver the low-latency user experience end users have come to expect.
|
||||
|
||||
**AppFormix Architectural Review**
|
||||
From an operational standpoint, it is necessary to understand how applications are consuming infrastructure resources in order to maximize ROI and guarantee SLAs. AppFormix software empowers operators and developers to monitor, visualize, and control how physical resources are utilized by cloud workloads.
|
||||
|
||||
At the center of the software, the AppFormix Data Platform provides a distributed analysis engine that performs configurable, real-time evaluation of in-depth, high-resolution metrics. On each host, the resource-efficient AppFormix Agent collects and evaluates multi-layer metrics from the hardware, virtualization layer, and up to the application. Intelligent agents offer sub-second response times that make it possible to detect and solve problems before they start to impact applications and users. The raw data is associated with the elements that comprise a cloud-native environment: applications, virtual machines, containers, hosts. The AppFormix Agent then publishes metrics and events to a Data Manager that stores and forwards the data to Analytics modules. Events are based on predefined or dynamic conditions set by users or infrastructure operators to make sure that SLAs and policies are being met.
|
||||
|
||||
|
||||
|  |
|
||||
| Figure 1: Roll-up summary view of the Kubernetes cluster. Operators and Users can define their SLA policies and AppFormix provides with a real-time view of the health of all elements in the Kubernetes cluster. |
|
||||
|
||||
|
||||
|
||||
|  |
|
||||
| Figure 2: Real-Time visualization of telemetry from a Kubernetes node provides a quick overview of resource utilization on the host as well as resources consumed by the pods and containers. The user defined Labels make is easy to capture namespaces, and other metadata. |
|
||||
|
||||
Additional subsystems are the Policy Controller and Analytics. The Policy Controller manages policies for resource monitoring, analysis, and control. It also provides role-based access control. The Analytics modules analyze metrics and events produced by Data Platform, enabling correlation across multiple elements to provide higher-level information to operators and developers. The Analytics modules may also configure policies in Policy Controller in response to conditions in the infrastructure.
|
||||
|
||||
AppFormix organizes elements of cloud infrastructure around hosts and instances (either containers or virtual machines), and logical groups of such elements. AppFormix integrates with cloud platforms using Adapter modules that discover the physical and virtual elements in the environment and configure those elements into the Policy Controller.
|
||||
|
||||
**Integrating AppFormix with Kubernetes**
|
||||
Enterprises often run many environments located on- or off-prem, as well as running different compute technologies (VMs, containers, bare metal). The analytics platform we’ve developed at AppFormix gives Kubernetes users a single pane of glass from which to monitor and manage container clusters in private and hybrid environments.
|
||||
|
||||
The AppFormix Kubernetes Adapter leverages the REST-based APIs of Kubernetes to discover nodes, pods, containers, services, and replication controllers. With the relational information about each element, Kubernetes Adapter is able to represent all of these elements in our system. A pod is a group of containers. A service and a replication controller are both different types of pod groups. In addition, using the watch endpoint, Kubernetes Adapter stays aware of changes to the environment.
|
||||
|
||||
**DevOps in the Enterprise with AppFormix**
|
||||
With AppFormix, developers and operators can work collaboratively to optimize applications and infrastructure. Users can access a self-service IT experience that delivers visibility into CPU, memory, storage, and network consumption by each layer of the stack: physical hardware, platform, and application software.
|
||||
|
||||
|
||||
- **Real-time multi-layer performance metrics** - In real-time, developers can view multi-layer metrics that show container resource consumption in context of the physical node on which it executes. With this context, developers can determine if application performance is limited by the physical infrastructure, due to contention or resource exhaustion, or by application design.
|
||||
- **Proactive resource control** - AppFormix Health Analytics provides policy-based actions in response to conditions in the cluster. For example, when resource consumption exceeds threshold on a worker node, Health Analytics can remove the node from the scheduling pool by invoking Kubernetes REST APIs. This dynamic control is driven by real-time monitoring at each node.
|
||||
- **Capacity planning** - Kubernetes will schedule workloads, but operators need to understand how the resources are being utilized. What resources have the most demand? How is demand trending over time? Operators can generate reports that provide necessary data for capacity planning.
|
||||
|
||||
|
||||
|
||||
|
||||
As you can see, we’re working hard to give Kubernetes users a useful, performant toolset for both OpenStack and Kubernetes environments that allows operators to deliver self-service IT to their application developers. We’re excited to be partner contributing to the Kubernetes ecosystem and community.
|
||||
|
||||
_-- Sumeet Singh, Founder and CEO, AppFormix_
|
||||
|
|
@ -0,0 +1,241 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Building highly available applications using Kubernetes new multi-zone clusters (a.k.a. 'Ubernetes Lite') "
|
||||
date: Wednesday, March 29, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor's note: this is the third post in a [series of in-depth posts](http://blog.kubernetes.io/2016/03/five-days-of-kubernetes-12.html) on what's new in Kubernetes 1.2_
|
||||
|
||||
|
||||
|
||||
### Introduction
|
||||
One of the most frequently-requested features for Kubernetes is the ability to run applications across multiple zones. And with good reason — developers need to deploy applications across multiple domains, to improve availability in thxe advent of a single zone outage.
|
||||
|
||||
[Kubernetes 1.2](http://blog.kubernetes.io/2016/03/Kubernetes-1.2-even-more-performance-upgrades-plus-easier-application-deployment-and-management-.html), released two weeks ago, adds support for running a single cluster across multiple failure zones (GCP calls them simply "zones," Amazon calls them "availability zones," here we'll refer to them as "zones"). This is the first step in a broader effort to allow federating multiple Kubernetes clusters together (sometimes referred to by the affectionate nickname "[Ubernetes](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/federation.md)"). This initial version (referred to as "Ubernetes Lite") offers improved application availability by spreading applications across multiple zones within a single cloud provider.
|
||||
|
||||
Multi-zone clusters are deliberately simple, and by design, very easy to use — no Kubernetes API changes were required, and no application changes either. You simply deploy your existing Kubernetes application into a new-style multi-zone cluster, and your application automatically becomes resilient to zone failures.
|
||||
|
||||
|
||||
### Now into some details . . .
|
||||
Ubernetes Lite works by leveraging the Kubernetes platform’s extensibility through labels. Today, when nodes are started, labels are added to every node in the system. With Ubernetes Lite, the system has been extended to also add information about the zone it's being run in. With that, the scheduler can make intelligent decisions about placing application instances.
|
||||
|
||||
Specifically, the scheduler already spreads pods to minimize the impact of any single node failure. With Ubernetes Lite, via `SelectorSpreadPriority`, the scheduler will make a best-effort placement to spread across zones as well. We should note, if the zones in your cluster are heterogenous (e.g., different numbers of nodes or different types of nodes), you may not be able to achieve even spreading of your pods across zones. If desired, you can use homogenous zones (same number and types of nodes) to reduce the probability of unequal spreading.
|
||||
|
||||
This improved labeling also applies to storage. When persistent volumes are created, the `PersistentVolumeLabel` admission controller automatically adds zone labels to them. The scheduler (via the `VolumeZonePredicate` predicate) will then ensure that pods that claim a given volume are only placed into the same zone as that volume, as volumes cannot be attached across zones.
|
||||
|
||||
|
||||
### Walkthrough
|
||||
We're now going to walk through setting up and using a multi-zone cluster on both [Google Compute Engine](https://cloud.google.com/compute/) (GCE) and Amazon EC2 using the default kube-up script that ships with Kubernetes. Though we highlight GCE and EC2, this functionality is available in any Kubernetes 1.2 deployment where you can make changes during cluster setup. This functionality will also be available in [Google Container Engine](https://cloud.google.com/container-engine/) (GKE) shortly.
|
||||
|
||||
|
||||
### Bringing up your cluster
|
||||
Creating a multi-zone deployment for Kubernetes is the same as for a single-zone cluster, but you’ll need to pass an environment variable (`"MULTIZONE”`) to tell the cluster to manage multiple zones. We’ll start by creating a multi-zone-aware cluster on GCE and/or EC2.
|
||||
|
||||
GCE:
|
||||
|
||||
```
|
||||
curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=gce
|
||||
KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash
|
||||
```
|
||||
EC2:
|
||||
|
||||
```
|
||||
curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws
|
||||
KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash
|
||||
```
|
||||
At the end of this command, you will have brought up a cluster that is ready to manage nodes running in multiple zones. You’ll also have brought up `NUM_NODES` nodes and the cluster's control plane (i.e., the Kubernetes master), all in the zone specified by `KUBE_{GCE,AWS}_ZONE`. In a future iteration of Ubernetes Lite, we’ll support a HA control plane, where the master components are replicated across zones. Until then, the master will become unavailable if the zone where it is running fails. However, containers that are running in all zones will continue to run and be restarted by Kubelet if they fail, thus the application itself will tolerate such a zone failure.
|
||||
|
||||
|
||||
### Nodes are labeled
|
||||
To see the additional metadata added to the node, simply view all the labels for your cluster (the example here is on GCE):
|
||||
|
||||
```
|
||||
$ kubectl get nodes --show-labels
|
||||
|
||||
NAME STATUS AGE LABELS
|
||||
kubernetes-master Ready,SchedulingDisabled 6m
|
||||
beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-master
|
||||
kubernetes-minion-87j9 Ready 6m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-87j9
|
||||
kubernetes-minion-9vlv Ready 6m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-9vlv
|
||||
kubernetes-minion-a12q Ready 6m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-a12q
|
||||
```
|
||||
The scheduler will use the labels attached to each of the nodes (failure-domain.beta.kubernetes.io/region for the region, and failure-domain.beta.kubernetes.io/zone for the zone) in its scheduling decisions.
|
||||
|
||||
|
||||
### Add more nodes in a second zone
|
||||
Let's add another set of nodes to the existing cluster, but running in a different zone (us-central1-b for GCE, us-west-2b for EC2). We run kube-up again, but by specifying `KUBE_USE_EXISTING_MASTER=1` kube-up will not create a new master, but will reuse one that was previously created.
|
||||
|
||||
GCE:
|
||||
|
||||
```
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce
|
||||
KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
On EC2, we also need to specify the network CIDR for the additional subnet, along with the master internal IP address:
|
||||
|
||||
```
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws
|
||||
KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24
|
||||
MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
View the nodes again; 3 more nodes will have been launched and labelled (the example here is on GCE):
|
||||
|
||||
```
|
||||
$ kubectl get nodes --show-labels
|
||||
|
||||
NAME STATUS AGE LABELS
|
||||
kubernetes-master Ready,SchedulingDisabled 16m
|
||||
beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-master
|
||||
kubernetes-minion-281d Ready 2m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kub
|
||||
ernetes.io/hostname=kubernetes-minion-281d
|
||||
kubernetes-minion-87j9 Ready 16m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-87j9
|
||||
kubernetes-minion-9vlv Ready 16m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-9vlv
|
||||
kubernetes-minion-a12q Ready 17m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-a12q
|
||||
kubernetes-minion-pp2f Ready 2m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kub
|
||||
ernetes.io/hostname=kubernetes-minion-pp2f
|
||||
kubernetes-minion-wf8i Ready 2m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kub
|
||||
ernetes.io/hostname=kubernetes-minion-wf8i
|
||||
```
|
||||
Let’s add one more zone:
|
||||
|
||||
GCE:
|
||||
|
||||
```
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce
|
||||
KUBE_GCE_ZONE=us-central1-f NUM_NODES=3 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
EC2:
|
||||
|
||||
```
|
||||
KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws
|
||||
KUBE_AWS_ZONE=us-west-2c NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.2.0/24
|
||||
MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh
|
||||
```
|
||||
Verify that you now have nodes in 3 zones:
|
||||
|
||||
```
|
||||
kubectl get nodes --show-labels
|
||||
```
|
||||
Highly available apps, here we come.
|
||||
|
||||
|
||||
### Deploying a multi-zone application
|
||||
Create the guestbook-go example, which includes a ReplicationController of size 3, running a simple web app. Download all the files from [here](https://github.com/kubernetes/kubernetes/tree/master/examples/guestbook-go), and execute the following command (the command assumes you downloaded them to a directory named “guestbook-go”:
|
||||
|
||||
```
|
||||
kubectl create -f guestbook-go/
|
||||
```
|
||||
You’re done! Your application is now spread across all 3 zones. Prove it to yourself with the following commands:
|
||||
|
||||
```
|
||||
$ kubectl describe pod -l app=guestbook | grep Node
|
||||
Node: kubernetes-minion-9vlv/10.240.0.5
|
||||
Node: kubernetes-minion-281d/10.240.0.8
|
||||
Node: kubernetes-minion-olsh/10.240.0.11
|
||||
|
||||
$ kubectl get node kubernetes-minion-9vlv kubernetes-minion-281d
|
||||
kubernetes-minion-olsh --show-labels
|
||||
NAME STATUS AGE LABELS
|
||||
kubernetes-minion-9vlv Ready 34m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kub
|
||||
ernetes.io/hostname=kubernetes-minion-9vlv
|
||||
kubernetes-minion-281d Ready 20m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kub
|
||||
ernetes.io/hostname=kubernetes-minion-281d
|
||||
kubernetes-minion-olsh Ready 3m
|
||||
beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.
|
||||
io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-f,kub
|
||||
ernetes.io/hostname=kubernetes-minion-olsh
|
||||
```
|
||||
Further, load-balancers automatically span all zones in a cluster; the guestbook-go example includes an example load-balanced service:
|
||||
|
||||
```
|
||||
$ kubectl describe service guestbook | grep LoadBalancer.Ingress
|
||||
LoadBalancer Ingress: 130.211.126.21
|
||||
|
||||
ip=130.211.126.21
|
||||
|
||||
$ curl -s http://${ip}:3000/env | grep HOSTNAME
|
||||
"HOSTNAME": "guestbook-44sep",
|
||||
|
||||
$ (for i in `seq 20`; do curl -s http://${ip}:3000/env | grep HOSTNAME; done)
|
||||
```
|
||||
|
||||
```
|
||||
| sort | uniq
|
||||
"HOSTNAME": "guestbook-44sep",
|
||||
"HOSTNAME": "guestbook-hum5n",
|
||||
"HOSTNAME": "guestbook-ppm40",
|
||||
```
|
||||
The load balancer correctly targets all the pods, even though they’re in multiple zones.
|
||||
|
||||
### Shutting down the cluster
|
||||
When you're done, clean up:
|
||||
|
||||
GCE:
|
||||
|
||||
```
|
||||
KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true
|
||||
KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true
|
||||
KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a
|
||||
kubernetes/cluster/kube-down.sh
|
||||
```
|
||||
EC2:
|
||||
|
||||
```
|
||||
KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c
|
||||
kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b
|
||||
kubernetes/cluster/kube-down.sh
|
||||
KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a
|
||||
kubernetes/cluster/kube-down.sh
|
||||
```
|
||||
|
||||
|
||||
### Conclusion
|
||||
A core philosophy for Kubernetes is to abstract away the complexity of running highly available, distributed applications. As you can see here, other than a small amount of work at cluster spin-up time, all the complexity of launching application instances across multiple failure domains requires no additional work by application developers, as it should be. And we’re just getting started!
|
||||
|
||||
Please join our community and help us build the future of Kubernetes! There are many ways to participate. If you’re particularly interested in scalability, you’ll be interested in:
|
||||
|
||||
|
||||
- Our federation [slack channel ](https://kubernetes.slack.com/messages/sig-federation/)
|
||||
- The federation “Special Interest Group,” which meets every Thursday at 9:30 a.m. Pacific Time at [SIG-Federation hangout ](https://plus.google.com/hangouts/_/google.com/ubernetes)
|
||||
|
||||
|
||||
And of course for more information about the project in general, go to www.kubernetes.io
|
||||
|
||||
-- _Quinton Hoole, Staff Software Engineer, Google, and Justin Santa Barbara_
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " ElasticBox introduces ElasticKube to help manage Kubernetes within the enterprise "
|
||||
date: Saturday, March 11, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Today’s guest post is brought to you by Brannan Matherson, from ElasticBox, who’ll discuss a new open source project to help standardize container deployment and management in enterprise environments. This highlights the advantages of authentication and user management for containerized applications
|
||||
|
||||
I’m delighted to share some exciting work that we’re doing at ElasticBox to contribute to the open source community regarding the rapidly changing advancements in container technologies. Our team is kicking off a new initiative called [ElasticKube](http://elastickube.com/) to help solve the problem of challenging container management scenarios within the enterprise. This project is a native container management experience that is specific to Kubernetes and leverages automation to provision clusters for containerized applications based on the latest release of Kubernetes 1.2.
|
||||
|
||||
I’ve talked to many enterprise companies, both large and small, and the plethora of cloud offering capabilities is often confusing and makes the evaluation process very difficult, so why Kubernetes? Of the large public cloud players - Amazon Web Services, Microsoft Azure, and Google Cloud Platform - Kubernetes is poised to take an innovative leadership role in framing the container management space. The Kubernetes platform does not restrict or dictate any given technical approach for containers, but encourages the community to collectively solve problems as this container market still takes form. With a proven track record of supporting open source efforts, Kubernetes platform allows my team and me to actively contribute to this fundamental shift in the IT and developer world.
|
||||
|
||||
We’ve chosen Kubernetes, not just for the core infrastructure services, but also the agility of Kubernetes to leverage the cluster management layer across any cloud environment - GCP, AWS, Azure, vSphere, and Rackspace. Kubernetes also provides a huge benefit for users to run clusters for containers locally on many popular technologies such as: Docker, Vagrant (and VirtualBox), CoreOS, Mesos and more. This amount of choice enables our team and many others in the community to consider solutions that will be viable for a wide range of enterprise scenarios. In the case of ElasticKube, we’re pleased with Kubernetes 1.2 which includes the full release of the deployment API. This provides the ability for us to perform seamless rolling updates of containerized applications that are running in production. In addition, we’ve been able to support new resource types like ConfigMaps and Horizontal Pod Autoscalers.
|
||||
|
||||
Fundamentally, ElasticKube delivers a web console for which compliments Kubernetes for users managing their clusters. The initial experience incorporates team collaboration, lifecycle management and reporting, so organizations can efficiently manage resources in a predictable manner. Users will see an ElasticKube portal that takes advantage of the infrastructure abstraction that enables users to run a container that has already been built. With ElasticKube assuming the cluster has been deployed, the overwhelming value is to provide visibility into who did what and define permissions for access to the cluster with multiple containers running on them. Secondly, by partitioning clusters into namespaces, authorization management is more effective. Finally, by empowering users to build a set of reusable templates in a modern portal, ElasticKube provides a vehicle for delivering a self-service template catalog that can be stored in GitHub (for instance, using Helm templates) and deployed easily.
|
||||
|
||||
ElasticKube enables organizations to accelerate adoption by developers, application operations and traditional IT operations teams and shares a mutual goal of increasing developer productivity, driving efficiency in container management and promoting the use of microservices as a modern application delivery methodology. When leveraging ElasticKube in your environment, users need to ensure the following technologies are configured appropriately to guarantee everything runs correctly:
|
||||
|
||||
-
|
||||
Configure Google Container Engine (GKE) for cluster installation and management
|
||||
|
||||
-
|
||||
Use Kubernetes to provision the infrastructure and clusters for containers
|
||||
|
||||
-
|
||||
Use your existing tools of choice to actually build your containers
|
||||
-
|
||||
|
||||
Use ElasticKube to run, deploy and manage your containers and services
|
||||
|
||||
[](http://cl.ly/0i3M2L3Q030z/Image%202016-03-11%20at%209.49.12%20AM.png)
|
||||
|
||||
|
||||
|
||||
Getting Started with Kubernetes and ElasticKube
|
||||
|
||||
|
||||
|
||||
|
||||
(this is a 3min walk through video with the following topics)
|
||||
|
||||
1.
|
||||
Deploy ElasticKube to a Kubernetes cluster
|
||||
2.
|
||||
Configuration
|
||||
3.
|
||||
Admin: Setup and invite a user
|
||||
4.
|
||||
Deploy an instance
|
||||
|
||||
|
||||
|
||||
Hear What Others are Saying
|
||||
|
||||
“Kubernetes has provided us the level of sophistication required for enterprises to manage containers across complex networking environments and the appropriate amount of visibility into the application lifecycle. Additionally, the community commitment and engagement has been exceptional, and we look forward to being a major contributor to this next wave of modern cloud computing and application management.”
|
||||
|
||||
_~Alberto Arias Maestro, Co-founder and Chief Technology Officer, ElasticBox_
|
||||
|
||||
|
||||
|
||||
_-- Brannan Matherson, Head of Product Marketing, ElasticBox_
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
layout: blog
|
||||
title: " Five Days of Kubernetes 1.2 "
|
||||
permalink: /blog/:year/:month/:title
|
||||
date: Tuesday, March 28, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
The Kubernetes project has had some huge milestones over the past few weeks. We released [Kubernetes 1.2](http://blog.kubernetes.io/2016/03/Kubernetes-1.2-even-more-performance-upgrades-plus-easier-application-deployment-and-management-.html), had our [first conference in Europe](https://kubecon.io/), and were accepted into the [Cloud Native Computing Foundation](https://cncf.io/). While we catch our breath, we would like to take a moment to highlight some of the great work contributed by the community since our last milestone, just four months ago.
|
||||
|
||||
|
||||
|
||||
Our mission is to make building distributed systems easy and accessible for all. While Kubernetes 1.2 has LOTS of new features, there are a few that really highlight the strides we’re making towards that goal. Over the course of the next week, we’ll be publishing a series of in-depth posts covering what’s new, so come back daily this week to read about the new features that continue to make Kubernetes the easiest way to run containers at scale. Thanks, and stay tuned!
|
||||
|
||||
|
||||
|
||||
|
|
||||
3/28
|
||||
|
|
||||
\* [1000 nodes and Beyond: Updates to Kubernetes performance and scalability in 1.2](http://blog.kubernetes.io/2016/03/1000-nodes-and-beyond-updates-to-Kubernetes-performance-and-scalability-in-12.html)
|
||||
\* Guest post by Sysdig: [How container metadata changes your point of view](http://blog.kubernetes.io/2016/03/how-container-metadata-changes-your-point-of-view.html)
|
||||
|
|
||||
|
|
||||
3/29
|
||||
|
|
||||
\* [Building highly available applications using Kubernetes new multi-zone clusters (a.k.a. Ubernetes Lite")](http://blog.kubernetes.io/2016/03/building-highly-available-applications-using-Kubernetes-new-multi-zone-clusters-a.k.a-Ubernetes-Lite.html)
|
||||
\* Guest post by AppFormix: [Helping Enterprises Operationalize Kubernetes](http://blog.kubernetes.io/2016/03/appformix-helping-enterprises.html)
|
||||
|
|
||||
|
|
||||
3/30
|
||||
|
|
||||
\* [Using Spark and Zeppelin to process big data on Kubernetes 1.2](http://blog.kubernetes.io/2016/03/using-Spark-and-Zeppelin-to-process-Big-Data-on-Kubernetes.html).
|
||||
|
|
||||
|
|
||||
3/31
|
||||
|
|
||||
\* [Kubernetes 1.2 and simplifying advanced networking with Ingress](http://blog.kubernetes.io/2016/03/Kubernetes-1.2-and-simplifying-advanced-networking-with-Ingress.html)
|
||||
|
|
||||
|
|
||||
4/1
|
||||
|
|
||||
\* [Using Deployment Objects with Kubernetes 1.2](http://blog.kubernetes.io/2016/04/using-deployment-objects-with.html)
|
||||
|
|
||||
|
|
||||
BONUS
|
||||
|
|
||||
\* ConfigMap API [Configuration management with Containers](http://blog.kubernetes.io/2016/04/configuration-management-with-containers.html)
|
||||
|
|
||||
|
||||
|
||||
|
||||
You can follow us on twitter here [@Kubernetesio](https://twitter.com/kubernetesio)
|
||||
|
||||
|
||||
_--David Aronchick, Senior Product Manager for Kubernetes, Google_
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " How container metadata changes your point of view "
|
||||
date: Tuesday, March 28, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today’s guest post is brought to you by Apurva Davé, VP of Marketing at Sysdig, who’ll discuss using Kubernetes metadata & Sysdig to understand what’s going on in your Kubernetes cluster. _
|
||||
|
||||
Sure, metadata is a fancy word. It actually means “data that describes other data.” While that definition isn’t all that helpful, it turns out metadata itself is especially helpful in container environments. When you have any complex system, the availability of metadata helps you sort and process the variety of data coming out of that system, so that you can get to the heart of an issue with less headache.
|
||||
|
||||
In a Kubernetes environment, metadata can be a crucial tool for organizing and understanding the way containers are orchestrated across your many services, machines, availability zones or (in the future) multiple clouds. This metadata can also be consumed by other services running on top of your Kubernetes system and can help you manage your applications.
|
||||
|
||||
We’ll take a look at some examples of this below, but first...
|
||||
|
||||
###
|
||||
A quick intro to Kubernetes metadata
|
||||
Kubernetes metadata is abundant in the form of [_labels_](http://kubernetes.io/docs/user-guide/labels/) and [_annotations_](http://kubernetes.io/docs/user-guide/annotations/). Labels are designed to be identifying metadata for your infrastructure, whereas annotations are designed to be non-identifying. For both, they’re simply generic key:value pairs that look like this:
|
||||
|
||||
```
|
||||
"labels": {
|
||||
"key1" : "value1",
|
||||
"key2" : "value2"
|
||||
}
|
||||
```
|
||||
Labels are not designed to be unique; you can expect any number of objects in your environment to carry the same label, and you can expect that an object could have many labels.
|
||||
|
||||
What are some examples of labels you might use? Here are just a few. WARNING: Once you start, you might find more than a few ways to use this functionality!
|
||||
|
||||
|
||||
- Environment: Dev, Prod, Test, UAT
|
||||
- Customer: Cust A, Cust B, Cust C
|
||||
- Tier: Frontend, Backend
|
||||
- App: Cache, Web, Database, Auth
|
||||
|
||||
In addition to custom labels you might define, Kubernetes also automatically applies labels to your system with useful metadata. Default labels supply key identifying information about your entire Kubernetes hierarchy: Pods, Services, Replication Controllers,and Namespaces.
|
||||
|
||||
|
||||
### Putting your metadata to work
|
||||
Once you spend a little time with Kubernetes, you’ll see that labels have one particularly powerful application that makes them essential:
|
||||
|
||||
**Kubernetes labels allows you to easily move between a “physical” view of your hosts and containers, and a “logical” view of your applications and micro-services. **
|
||||
|
||||
At its core, a platform like Kubernetes is designed to orchestrate the optimal use of underlying physical resources. This is a powerful way to consume private or public cloud resources very efficiently, and sometimes you need to visualize those physical resources. In reality, however, most of the time you care about the performance of the service first and foremost.
|
||||
|
||||
But in a Kubernetes world, achieving that high utilization means a service’s containers may be scattered all over the place! So how do you actually measure the service’s performance? That’s where the metadata comes in. With Kubernetes metadata, you can create a deep understanding of your service’s performance, regardless of where the underlying containers are physically located.
|
||||
|
||||
|
||||
### Paint me a picture
|
||||
Let’s look at a quick example to make this more concrete: monitoring your application. Let’s work with a small, 3 node deployment running on GKE. For visualizing the environment we’ll use Sysdig Cloud. Here’s a list of the the nodes — note the “gke” prepended to the name of each host. We see some basic performance details like CPU, memory and network.
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-NSkvJcEj0L0/VvmM1eWSlLI/AAAAAAAAA5w/YupjdMPz8aEmXjSt8xyZJVOoa4osyLYBg/s1600/sysdig1.png)
|
||||
|
||||
Each of these hosts has a number of containers running on it. Drilling down on the hosts, we see the containers associated with each:
|
||||
|
||||
|
||||
[](https://2.bp.blogspot.com/-7hrB4V8zAkg/VvmJRpLcQQI/AAAAAAAAAYA/Fz7pul56ZQ8Xus6u4zHBFAwe8HJesyeRw/s1600/Kubernetes%2BMetadata%2BBlog%2B2.png)
|
||||
|
||||
|
||||
|
||||
Simply scanning this list of containers on a single host, I don’t see much organization to the responsibilities of these objects. For example, some of these containers run Kubernetes services (like kube-ui) and we presume others have to do with the application running (like javaapp.x).
|
||||
|
||||
Now let’s use some of the metadata provided by Kubernetes to take an application-centric view of the system. Let’s start by creating a hierarchy of components based on labels, in this order:
|
||||
|
||||
`Kubernetes namespace -> replication controller -> pod -> container`
|
||||
|
||||
This aggregates containers at corresponding levels based on the above labels. In the app UI below, this aggregation and hierarchy are shown in the grey “grouping” bar above the data about our hosts. As you can see, we have a “prod” namespace with a group of services (replication controllers) below it. Each of those replication controllers can then consist of multiple pods, which are in turn made up of containers.
|
||||
|
||||
|
||||
[](https://4.bp.blogspot.com/-7JuCC5kuF6U/VvmJzM4UYmI/AAAAAAAAAYE/iIhR19aVCpAaVFRKujflMo047PmzP0DpA/s1600/Kubernetes%2BMetadata%2BBlog%2B3.png)
|
||||
|
||||
In addition to organizing containers via labels, this view also aggregates metrics across relevant containers, giving a singular view into the performance of a namespace or replication controller.
|
||||
|
||||
**In other words, with this aggregated view based on metadata, you can now start by monitoring and troubleshooting services, and drill into hosts and containers only if needed. **
|
||||
|
||||
Let’s do one more thing with this environment — let’s use the metadata to create a visual representation of services and the topology of their communications. Here you see our containers organized by services, but also a map-like view that shows you how these services relate to each other.
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-URGCJheccOE/Vvmeh7VnzgI/AAAAAAAAA6I/WIz3pmcrk9A5sgadIU5J8lVObg32HFlQQ/s1600/sysdig4.png)
|
||||
|
||||
The boxes represent services that are aggregates of containers (the number in the upper right of each box tells you how many containers), and the lines represent communications between services and their latencies.
|
||||
|
||||
This kind of view provides yet another logical, instead of physical, view of how these application components are working together. From here I can understand service performance, relationships and underlying resource consumption (CPU in this example).
|
||||
|
||||
|
||||
### Metadata: love it, use it
|
||||
This is a pretty quick tour of metadata, but I hope it inspires you to spend a little time thinking about the relevance to your own system and how you could leverage it. Here we built a pretty simple example — apps and services — but imagine collecting metadata across your apps, environments, software components and cloud providers. You could quickly assess performance differences across any slice of this infrastructure effectively, all while Kubernetes is efficiently scheduling resource usage.
|
||||
|
||||
Get started with metadata for visualizing these resources today, and in a followup post we’ll talk about the power of adaptive alerting based on metadata.
|
||||
|
||||
_-- Apurva Davé is a closet Kubernetes fanatic, loves data, and oh yeah is also the VP of Marketing at Sysdig._
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes 1.2 and simplifying advanced networking with Ingress "
|
||||
date: Friday, March 31, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor's note: This is the sixth post in a [series of in-depth posts](http://blog.kubernetes.io/2016/03/five-days-of-kubernetes-12.html) on what's new in Kubernetes 1.2._
|
||||
_Ingress is currently in beta and under active development._
|
||||
|
||||
In Kubernetes, Services and Pods have IPs only routable by the cluster network, by default. All traffic that ends up at an edge router is either dropped or forwarded elsewhere. In Kubernetes 1.2, we’ve made improvements to the Ingress object, to simplify allowing inbound connections to reach the cluster services. It can be configured to give services externally-reachable URLs, load balance traffic, terminate SSL, offer name based virtual hosting and lots more.
|
||||
|
||||
|
||||
### Ingress controllers
|
||||
Today, with containers or VMs, configuring a web server or load balancer is harder than it should be. Most web server configuration files are very similar. There are some applications that have weird little quirks that tend to throw a wrench in things, but for the most part, you can apply the same logic to them and achieve a desired result. In Kubernetes 1.2, the Ingress resource embodies this idea, and an Ingress controller is meant to handle all the quirks associated with a specific "class" of Ingress (be it a single instance of a load balancer, or a more complicated setup of frontends that provide GSLB, CDN, DDoS protection etc). An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the ApiServer's /ingresses endpoint for updates to the [Ingress resource](http://kubernetes.io/docs/user-guide/ingress/). Its job is to satisfy requests for ingress.
|
||||
|
||||
Your Kubernetes cluster must have exactly one Ingress controller that supports TLS for the following example to work. If you’re on a cloud-provider, first check the “kube-system” namespace for an Ingress controller RC. If there isn’t one, you can deploy the [nginx controller](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx), or [write your own](https://github.com/kubernetes/contrib/tree/master/ingress/controllers#writing-an-ingress-controller) in \< 100 lines of code.
|
||||
|
||||
Please take a minute to look over the known limitations of existing controllers (gce, nginx).
|
||||
|
||||
|
||||
### TLS termination and HTTP load-balancing
|
||||
Since the Ingress spans Services, it’s particularly suited for load balancing and centralized security configuration. If you’re familiar with the go programming language, Ingress is like [net/http’s “Server”](https://golang.org/pkg/net/http/#Server) for your entire cluster. The following example shows you how to configure TLS termination. Load balancing is not optional when dealing with ingress traffic, so simply creating the object will configure a load balancer.
|
||||
|
||||
First create a test Service. We’ll run a simple echo server for this example so you know exactly what’s going on. The source is [here](https://github.com/kubernetes/contrib/tree/master/ingress/echoheaders).
|
||||
```
|
||||
$ kubectl run echoheaders
|
||||
--image=gcr.io/google\_containers/echoserver:1.3 --port=8080
|
||||
$ kubectl expose deployment echoheaders --target-port=8080
|
||||
--type=NodePort
|
||||
```
|
||||
If you’re on a cloud-provider, make sure you can reach the Service from outside the cluster through its node port.
|
||||
|
||||
```
|
||||
$ NODE_IP=$(kubectl get node `kubectl get po -l run=echoheaders
|
||||
--template '{{range .items}}{{.spec.nodeName}}{{end}}'` --template
|
||||
'{{range $i, $n := .status.addresses}}{{if eq $n.type
|
||||
"ExternalIP"}}{{$n.address}}{{end}}{{end}}')
|
||||
$ NODE_PORT=$(kubectl get svc echoheaders --template '{{range $i, $e
|
||||
:= .spec.ports}}{{$e.nodePort}}{{end}}')
|
||||
$ curl $NODE_IP:$NODE_PORT
|
||||
```
|
||||
This is a sanity check that things are working as expected. If the last step hangs, you might need a [firewall rule](https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/BETA_LIMITATIONS.md#creating-the-firewall-rule-for-glbc-health-checks).
|
||||
|
||||
Now lets create our TLS secret:
|
||||
```
|
||||
$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout
|
||||
|
||||
/tmp/tls.key -out /tmp/tls.crt -subj "/CN=echoheaders/O=echoheaders"
|
||||
|
||||
$ echo "
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tls
|
||||
data:
|
||||
tls.crt: `base64 -w 0 /tmp/tls.crt`
|
||||
tls.key: `base64 -w 0 /tmp/tls.key`
|
||||
" | kubectl create -f
|
||||
```
|
||||
And the Ingress:
|
||||
|
||||
```
|
||||
$ echo "
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
|
||||
kind: Ingress
|
||||
|
||||
metadata:
|
||||
|
||||
name: test
|
||||
|
||||
spec:
|
||||
|
||||
tls:
|
||||
|
||||
- secretName: tls
|
||||
backend:
|
||||
serviceName: echoheaders
|
||||
servicePort: 8080
|
||||
" | kubectl create -f -
|
||||
```
|
||||
You should get a load balanced IP soon:
|
||||
```
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS AGE
|
||||
test - echoheaders:8080 130.X.X.X 4m
|
||||
```
|
||||
And if you wait till the Ingress controller marks your backends as healthy, you should see requests to that IP on :80 getting redirected to :443 and terminated using the given TLS certificates.
|
||||
```
|
||||
$ curl 130.X.X.X
|
||||
\<html\>
|
||||
\<head\>\<title\>301 Moved Permanently\</title\>\</head\>\<body bgcolor="white"\>\<center\>\<h1\>301 Moved Permanently\</h1\>\</center\>
|
||||
```
|
||||
|
||||
```
|
||||
$ curl https://130.X.X.X -kCLIENT VALUES:client\_address=10.48.0.1command=GETreal path=/
|
||||
|
||||
|
||||
$ curl 130.X.X.X -Lk
|
||||
|
||||
CLIENT VALUES:client\_address=10.48.0.1command=GETreal path=/
|
||||
```
|
||||
### Future work
|
||||
You can read more about the [Ingress API](http://kubernetes.io/docs/user-guide/ingress/) or controllers by following the links. The Ingress is still in beta, and we would love your input to grow it. You can contribute by writing controllers or evolving the API. All things related to the meaning of the word “[ingress](https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=ingress%20meaning)” are in scope, this includes DNS, different TLS modes, SNI, load balancing at layer 4, content caching, more algorithms, better health checks; the list goes on.
|
||||
|
||||
There are many ways to participate. If you’re particularly interested in Kubernetes and networking, you’ll be interested in:
|
||||
|
||||
- Our [Networking slack channel ](https://kubernetes.slack.com/messages/sig-network/)
|
||||
- Our [Kubernetes Networking Special Interest Group](https://groups.google.com/forum/#!forum/kubernetes-sig-network) email list
|
||||
- The Big Data “Special Interest Group,” which meets biweekly at 3pm (15h00) Pacific Time at [SIG-Networking hangout](https://zoom.us/j/5806599998)
|
||||
|
||||
And of course for more information about the project in general, go to[www.kubernetes.io](http://kubernetes.io/)
|
||||
|
||||
-- _Prashanth Balasubramanian, Software Engineer_
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes 1.2: Even more performance upgrades, plus easier application deployment and management "
|
||||
date: Friday, March 17, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Today we released Kubernetes 1.2. This release represents significant improvements for large organizations building distributed systems. Now with over 680 unique contributors to the project, this release represents our largest yet.
|
||||
|
||||
From the beginning, our mission has been to make building distributed systems easy and accessible for all. With the Kubernetes 1.2 release we’ve made strides towards our goal by increasing scale, decreasing latency and overall simplifying the way applications are deployed and managed. Now, developers at organizations of all sizes can build production scale apps more easily than ever before.
|
||||
|
||||
### What’s new:
|
||||
|
||||
- **Significant scale improvements**. Increased cluster scale by 400% to 1,000 nodes and 30,000 containers per cluster.
|
||||
- **Simplified application deployment and management**.
|
||||
|
||||
- Dynamic Configuration (via the ConfigMap API) enables applications to pull their configuration when they run rather than packaging it in at build time.
|
||||
- Turnkey Deployments (via the Beta Deployment API) let you declare your application and Kubernetes will do the rest. It handles versioning, multiple simultaneous rollouts, aggregating status across all pods, maintaining application availability and rollback.
|
||||
- **Automated cluster management** :
|
||||
|
||||
- Improved reliability through cross-zone failover and multi-zone scheduling
|
||||
- Simplified One-Pod-Per-Node Applications (via the Beta DaemonSet API) allows you to schedule a service (such as a logging agent) that runs one, and only one, pod per node.
|
||||
- TLS and L7 support (via the Beta Ingress API) provides a straightforward way to integrate into custom networking environments by supporting TLS for secure communication and L7 for http-based traffic routing.
|
||||
- Graceful Node Shutdown (aka Node Drain) takes care of transitioning pods off a node and allowing it to be shut down cleanly.
|
||||
- Custom Metrics for Autoscaling now supports custom metrics, allowing you to specify a set of signals to indicate autoscaling pods.
|
||||
- **New GUI** allows you to get started quickly and enables the same functionality found in the CLI for a more approachable and discoverable interface.
|
||||
|
||||
[](https://1.bp.blogspot.com/-_xwIlw1gJo4/VusiOuHRzCI/AAAAAAAAA3s/NDN91tgdypQE7iBjzTCWlO7vzfDNt_guw/s1600/k8-1.2-release.png)
|
||||
|
||||
- **And many more**. For a complete list of updates, see the [release notes on github](https://github.com/kubernetes/kubernetes/releases/tag/v1.2.0).
|
||||
|
||||
#### Community
|
||||
|
||||
All these improvements would not be possible without our enthusiastic and global community. The momentum is astounding. We’re seeing over 400 pull requests per week, a 50% increase since the previous 1.1 release. There are meetups and conferences discussing Kubernetes nearly every day, on top of the 85 Kubernetes related [meetup groups](http://www.meetup.com/topics/kubernetes/) around the world. We’ve also seen significant participation in the community in the form of Special Interest Groups, with 18 active SIGs that cover topics from AWS and OpenStack to big data and scalability, to get involved [join or start a new SIG](https://github.com/kubernetes/kubernetes/wiki/Special-Interest-Groups-(SIGs)). Lastly, we’re proud that Kubernetes is the first project to be accepted to the Cloud Native Computing Foundation (CNCF), read more about the announcement [here](https://cncf.io/news/announcement/2016/03/cloud-native-computing-foundation-accepts-kubernetes-first-hosted-projec-0).
|
||||
|
||||
|
||||
|
||||
#### Documentation
|
||||
|
||||
With Kubernetes 1.2 comes a relaunch of our website at [kubernetes.io](http://kubernetes.io/). We’ve slimmed down the docs contribution process so that all you have to do is fork/clone and send a PR. And the site works the same whether you’re staging it on your laptop, on github.io, or viewing it in production. It’s a pure GitHub Pages project; no scripts, no plugins.
|
||||
|
||||
|
||||
|
||||
From now on, our docs are at a new repo: [https://github.com/kubernetes/kubernetes.github.io](https://github.com/kubernetes/kubernetes.github.io)
|
||||
|
||||
|
||||
|
||||
To entice you even further to contribute, we’re also announcing our new bounty program. For every “bounty bug” you address with a merged pull request, we offer the listed amount in credit for Google Cloud Platform services. Just look for [bugs labeled “Bounty” in the new repo](https://github.com/kubernetes/kubernetes.github.io/issues?q=is%3Aissue+is%3Aopen+label%3ABounty) for more details.
|
||||
|
||||
|
||||
|
||||
#### Roadmap
|
||||
|
||||
All of our work is done in the open, to learn the latest about the project j[oin the weekly community meeting](https://groups.google.com/forum/#!forum/kubernetes-community-video-chat) or [watch a recorded hangout](https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ). In keeping with our major release schedule of every three to four months, here are just a few items that are in development for [next release and beyond](https://github.com/kubernetes/kubernetes/wiki/Release-1.3):
|
||||
|
||||
- Improved stateful application support (aka Pet Set)
|
||||
- Cluster Federation (aka Ubernetes)
|
||||
- Even more (more!) performance improvements
|
||||
- In-cluster IAM
|
||||
- Cluster autoscaling
|
||||
- Scheduled job
|
||||
- Public dashboard that allows for nightly test runs across multiple cloud providers
|
||||
- Lots, lots more!
|
||||
Kubernetes 1.2 is available for download at [get.k8s.io](http://get.k8s.io/) and via the open source repository hosted on [GitHub](https://github.com/kubernetes/kubernetes). To get started with Kubernetes try our new [Hello World app](http://kubernetes.io/docs/hellonode/).
|
||||
|
||||
|
||||
|
||||
#### Connect
|
||||
|
||||
We’d love to hear from you and see you participate in this growing community:
|
||||
|
||||
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)
|
||||
- Post questions (or answer questions) on [Stackoverflow](https://stackoverflow.com/questions/tagged/kubernetes)
|
||||
- Connect with the community on [Slack](http://slack.kubernetes.io/)
|
||||
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates
|
||||
|
||||
Thank you for your support!
|
||||
|
||||
|
||||
|
||||
- _David Aronchick, Senior Product Manager for Kubernetes, Google_
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes Community Meeting Notes - 20160225 "
|
||||
date: Wednesday, March 01, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
##### February 25th - Redspread demo, 1.2 update and planning 1.3, newbie introductions, SIG-networking and a shout out to CoreOS blog post.
|
||||
|
||||
The Kubernetes contributing community meets most Thursdays at 10:00PT to discuss the project's status via videoconference. Here are the notes from the latest meeting.
|
||||
|
||||
Note taker: [Ilan Rabinovich]
|
||||
* Quick call out for sharing presentations/slides [JBeda]
|
||||
* Demo (10 min):[ Redspread][1] [Mackenzie Burnett, Dan Gillespie]
|
||||
* 1.2 Release Watch [T.J. Goltermann]
|
||||
* currently about 80 issues in the queue that need to be addressed before branching.
|
||||
* currently looks like March 7th may slip to later in the week, but up in the air until flakey tests are resolved.
|
||||
* non-1.2 changes may be delayed in review/merging until 1.2 stabilization work completes.
|
||||
* 1.3 release planning
|
||||
* Newbie Introductions
|
||||
* SIG Reports -
|
||||
* Networking [Tim Hockin]
|
||||
* Scale [Bob Wise]
|
||||
* meeting last Friday went very well. Discussed charter AND a working deployment
|
||||
* moved meeting to Thursdays @ 1 (so in 3 hours!)
|
||||
* Rob is posting a Cluster Ops announce on TheNewStack to recruit more members
|
||||
* GSoC participation -- no application submitted. [Sarah Novotny]
|
||||
* Brian Grant has offered to review PRs that need attention for 1.2
|
||||
* Dynamic Provisioning
|
||||
* Currently overlaps a bit with the ubernetes work
|
||||
* PR in progress.
|
||||
* Should work in 1.2, but being targeted more in 1.3
|
||||
* Next meeting is March 3rd.
|
||||
* Demo from Weave on Kubernetes Anywhere
|
||||
* Another Kubernetes 1.2 update
|
||||
* Update from CNCF update
|
||||
* 1.3 commitments from google
|
||||
* No meeting on March 10th.
|
||||
|
||||
To get involved in the Kubernetes community consider joining our [Slack channel][2], taking a look at the [Kubernetes project][3] on GitHub, or join the [Kubernetes-dev Google group][4]. If you're really excited, you can do all of the above and join us for the next community conversation — March 3rd, 2016. Please add yourself or a topic you want to know about to the [agenda][5] and get a calendar invitation by joining [this group][6].
|
||||
|
||||
The full recording is available on YouTube in the growing archive of [Kubernetes Community Meetings][7]. _\-- Kubernetes Community_
|
||||
|
||||
[1]: https://redspread.com/
|
||||
[2]: http://slack.k8s.io/
|
||||
[3]: https://github.com/kubernetes/
|
||||
[4]: https://groups.google.com/forum/#!forum/kubernetes-dev
|
||||
[5]: https://docs.google.com/document/d/1VQDIAB0OqiSjIHI8AWMvSdceWhnz56jNpZrLs6o7NJY/edit#
|
||||
[6]: https://groups.google.com/forum/#!forum/kubernetes-community-video-chat
|
||||
[7]: https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Kubernetes in the Enterprise with Fujitsu’s Cloud Load Control "
|
||||
date: Saturday, March 11, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Today’s guest post is by Florian Walker, Product Manager at Fujitsu and working on Cloud Load Control, an offering focused on the usage of Kubernetes in an enterprise context. Florian tells us what potential Fujitsu sees in Kubernetes, and how they make it accessible to enterprises.
|
||||
|
||||
Earlier this year, Fujitsu released its Kubernetes-based offering Fujitsu ServerView[Cloud Load Control](http://www.fujitsu.com/software/clc/) (CLC) to the public. Some might be surprised since Fujitsu’s reputation is not necessarily related to software development, but rather to hardware manufacturing and IT services. As a long-time member of the Linux foundation and founding member of the Open Container Initiative and the Cloud Native Computing Foundation, Fujitsu does not only build software, but is committed to open source software, and contributes to several projects, including Kubernetes. But we not only believe in Kubernetes as an open source project, we also chose it as the core of our offering, because it provides the best balance of feature set, resource requirements and complexity to run distributed applications at scale.
|
||||
|
||||
Today, we want to take you on a short tour explaining the background of our offering, why we think Kubernetes is the right fit for your customers and what value Cloud Load Control provides on top of it.
|
||||
**A long long time ago…**
|
||||
|
||||
In mid 2014 we looked at the challenges enterprises are facing in the context of digitization, where traditional enterprises experience that more and more competitors from the IT sector are pushing into the core of their markets. A big part of Fujitsu’s customers are such traditional businesses, so we considered how we could help them and came up with three basic principles:
|
||||
|
||||
-
|
||||
Decouple applications from infrastructure - Focus on where the value for the customer is: the application.
|
||||
-
|
||||
Decompose applications - Build applications from smaller, loosely coupled parts. Enable reconfiguration of those parts depending on the needs of the business. Also encourage innovation by low-cost experiments.
|
||||
-
|
||||
Automate everything - Fight the increasing complexity of the first two points by introducing a high degree of automation.
|
||||
|
||||
We found that Linux containers themselves cover the first point and touch the second. But at this time there was little support for creating distributed applications and running them managed automatically. We found Kubernetes as the missing piece.
|
||||
**Not a free lunch**
|
||||
|
||||
The general approach of Kubernetes in managing containerized workload is convincing, but as we looked at it with the eyes of customers, we realized that it’s not a free lunch. Many customers are medium-sized companies whose core business is often bound to strict data protection regulations. The top three requirements we identified are:
|
||||
|
||||
-
|
||||
On-premise deployments (with the option for hybrid scenarios)
|
||||
-
|
||||
Efficient operations as part of a (much) bigger IT infrastructure
|
||||
-
|
||||
Enterprise-grade support, potentially on global scale
|
||||
|
||||
We created Cloud Load Control with these requirements in mind. It is basically a distribution of Kubernetes targeted for on-premise use, primarily focusing on operational aspects of container infrastructure. We are committed to work with the community, and contribute all relevant changes and extensions upstream to the Kubernetes project.
|
||||
**On-premise deployments**
|
||||
|
||||
As Kubernetes core developer Tim Hockin often puts it in his[talks](https://speakerdeck.com/thockin), Kubernetes is "a story with two parts" where setting up a Kubernetes cluster is not the easy part and often challenging due to variations in infrastructure. This is in particular true when it comes to production-ready deployments of Kubernetes. In the public cloud space, a customer could choose a service like Google Container Engine (GKE) to do this job. Since customers have less options on-premise, often they have to consider the deployment by themselves.
|
||||
|
||||
Cloud Load Control addresses these issues. It enables customers to reliably and readily provision a production grade Kubernetes clusters on their own infrastructure, with the following benefits:
|
||||
|
||||
-
|
||||
Proven setup process, lowers risk of problems while setting up the cluster
|
||||
-
|
||||
Reduction of provisioning time to minutes
|
||||
-
|
||||
Repeatable process, relevant especially for large, multi-tenant environments
|
||||
|
||||
Cloud Load Control delivers these benefits for a range of platforms, starting from selected OpenStack distributions in the first versions of Cloud Load Control, and successively adding more platforms depending on customer demand. We are especially excited about the option to remove the virtualization layer and support Kubernetes bare-metal on Fujitsu servers in the long run. By removing a layer of complexity, the total cost to run the system would be decreased and the missing hypervisor would increase performance.
|
||||
|
||||
Right now we are in the process of contributing a generic provider to set up Kubernetes on OpenStack. As a next step in driving multi-platform support, Docker-based deployment of Kubernetes seems to be crucial. We plan to contribute to this feature to ensure it is going to be Beta in Kubernetes 1.3.
|
||||
**Efficient operations**
|
||||
|
||||
Reducing operation costs is the target of any organization providing IT infrastructure. This can be achieved by increasing the efficiency of operations and helping operators to get their job done. Considering large-scale container infrastructures, we found it is important to differentiate between two types of operations:
|
||||
|
||||
-
|
||||
Platform-oriented, relates to the overall infrastructure, often including various systems, one of which might be Kubernetes.
|
||||
-
|
||||
Application-oriented, focusses rather on a single, or a small set of applications deployed on Kubernetes.
|
||||
|
||||
Kubernetes is already great for the application-oriented part. Cloud Load Control was created to help platform-oriented operators to efficiently manage Kubernetes as part of the overall infrastructure and make it easy to execute Kubernetes tasks relevant to them.
|
||||
|
||||
The first version of Cloud Load Control provides a user interface integrated in the OpenStack Horizon dashboard which enables the Platform ops to create and manage their Kubernetes clusters.
|
||||
|
||||

|
||||
|
||||
Clusters are treated as first-class citizens of OpenStack. Their creation is as simple as the creation of a virtual machine. Operators do not need to learn a new system or method of provisioning, and the self-service approach enables large organizations to rapidly provide the Kubernetes infrastructure to their tenants.
|
||||
|
||||
An intuitive UI is crucial for the simplification of operations. This is why we heavily contributed to the[Kubernetes Dashboard](https://github.com/kubernetes/dashboard) project and ship it in Cloud Load Control. Especially for operators who don’t know the Kubernetes CLI by heart, because they have to care about other systems too, a great UI is perfectly suitable to get typical operational tasks done, such as checking the health of the system or deploying a new application.
|
||||
|
||||
Monitoring is essential. With the dashboard, it is possible to get insights on cluster level. To ensure that the OpenStack operators have a deep understanding of their platform, we will soon add an integration with[Monasca](https://wiki.openstack.org/wiki/Monasca), OpenStack’s monitoring-as-a-service project, so metrics of Kubernetes can be analyzed together with OpenStack metrics from a single point of access.
|
||||
**Quality and enterprise-grade support**
|
||||
|
||||
As a Japanese company, quality and customer focus have the highest priority in every product and service we ship. This is where the actual value of Cloud Cloud Control comes from: it provides a specific version of the open source software which has been intensively tested and hardened to ensure stable operations on a particular set of platforms.
|
||||
|
||||
Acknowledging that container technology and Kubernetes is new territory for a lot of enterprises, expert assistance is the key for setting up and running a production-grade container infrastructure. Cloud Load Control comes with a support service leveraging Fujitsu’s proven support structure. This enables support also for customers operating Kubernetes in different regions of the world, like Europe and Japan, as part of the same offering.
|
||||
**Conclusion**
|
||||
|
||||
2014 seems to be light years away, we believe the decision for Kubernetes was the right one. It is built from the ground-up to enable the creation of container-based, distributed applications, and best supports this use case.
|
||||
|
||||
With Cloud Load Control, we’re excited to enable enterprises to run Kubernetes in production environments and to help their operators to efficiently use it, so DevOps teams can build awesome applications on top of it.
|
||||
|
||||
|
||||
|
||||
_-- Florian Walker, Product Manager, FUJITSU_
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Scaling neural network image classification using Kubernetes with TensorFlow Serving "
|
||||
date: Thursday, March 23, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
In 2011, Google developed an internal deep learning infrastructure called [DistBelief](http://research.google.com/pubs/pub40565.html), which allowed Googlers to build ever larger [neural networks](https://en.wikipedia.org/wiki/Artificial_neural_network) and scale training to thousands of cores. Late last year, Google [introduced TensorFlow](http://googleresearch.blogspot.com/2015/11/tensorflow-googles-latest-machine_9.html), its second-generation machine learning system. TensorFlow is general, flexible, portable, easy-to-use and, most importantly, developed with the open source community.
|
||||
|
||||
[](https://4.bp.blogspot.com/-PDRpnk823Ps/VvHJH3vIyKI/AAAAAAAAA4g/adIWZPfa2W4ObtIaWNbhpl8UyIwk9R7xg/s1600/tensorflowserving-4.png)
|
||||
|
||||
The process of introducing machine learning into your product involves creating and training a model on your dataset, and then pushing the model to production to serve requests. In this blog post, we’ll show you how you can use [Kubernetes](http://kubernetes.io/) with [TensorFlow Serving](http://googleresearch.blogspot.com/2016/02/running-your-models-in-production-with.html), a high performance, open source serving system for machine learning models, to meet the scaling demands of your application.
|
||||
|
||||
Let’s use image classification as an [example](https://tensorflow.github.io/serving/serving_inception). Suppose your application needs to be able to correctly identify an image across a set of categories. For example, given the cute puppy image below, your system should classify it as a retriever.
|
||||
|
||||
| [](https://3.bp.blogspot.com/-rUuOetJfoLc/VvHJHgDYusI/AAAAAAAAA4c/qO9xhVk4iH8EhrSqt3eZbqNGVQXH5fmCg/s1600/tensorflowserving-2.png) |
|
||||
| Image via [Wikipedia](https://commons.wikimedia.org/wiki/File:Golde33443.jpg) |
|
||||
|
||||
You can implement image classification with TensorFlow using the [Inception-v3 model](http://googleresearch.blogspot.com/2016/03/train-your-own-image-classifier-with.html) trained on the data from the [ImageNet dataset](http://www.image-net.org/). This dataset contains images and their labels, which allows the TensorFlow learner to train a model that can be used for by your application in production.
|
||||
|
||||
[](https://4.bp.blogspot.com/-oaJYNPqiqIc/VvHJH2Z19cI/AAAAAAAAA4k/xq8m0kqRIOUewTZLDvzjPh6YLHG4MxdSQ/s1600/tensorflowserving-1.png)
|
||||
Once the model is trained and [exported](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/session_bundle/exporter.py), [TensorFlow Serving](https://tensorflow.github.io/serving/) uses the model to perform inference — predictions based on new data presented by its clients. In our example, clients submit image classification requests over [gRPC](http://www.grpc.io/), a high performance, open source RPC framework from Google.
|
||||
|
||||
[](https://4.bp.blogspot.com/-g2S3V47h7BY/VvHJIkBlTiI/AAAAAAAAA4o/wISpFzB6kvIZxJHlnmM7-XYzZYl1YFfDA/s1600/tensorflowserving-5.png)
|
||||
|
||||
Inference can be very resource intensive. Our server executes the following TensorFlow graph to process every classification request it receives. The Inception-v3 model has over 27 million parameters and runs 5.7 billion floating point operations per inference.
|
||||
|
||||
| [](https://2.bp.blogspot.com/-Gcb6gxzqDkE/VvHJHE7yD3I/AAAAAAAAA4Y/4EZD83OV_8goqodV2pcaQKYeinokf9UuA/s1600/tensorflowserving-3.png) |
|
||||
| Schematic diagram of Inception-v3 |
|
||||
|
||||
Fortunately, this is where Kubernetes can help us. Kubernetes distributes inference request processing across a cluster using its [External Load Balancer](http://kubernetes.io/docs/user-guide/load-balancer/). Each [pod](http://kubernetes.io/docs/user-guide/pods/) in the cluster contains a [TensorFlow Serving Docker image](https://tensorflow.github.io/serving/docker) with the TensorFlow Serving-based gRPC server and a trained Inception-v3 model. The model is represented as a [set of files](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/session_bundle/README.md) describing the shape of the TensorFlow graph, model weights, assets, and so on. Since everything is neatly packaged together, we can dynamically scale the number of replicated pods using the [Kubernetes Replication Controller](http://kubernetes.io/docs/user-guide/replication-controller/operations/) to keep up with the service demands.
|
||||
|
||||
To help you try this out yourself, we’ve written a [step-by-step tutorial](https://tensorflow.github.io/serving/serving_inception), which shows you how to create the TensorFlow Serving Docker container to serve the Inception-v3 image classification model, configure a Kubernetes cluster and run classification requests against it. We hope this will make it easier for you to integrate machine learning into your own applications and scale it with Kubernetes! To learn more about TensorFlow Serving, check out [tensorflow.github.io/serving](http://tensorflow.github.io/serving).
|
||||
|
||||
- _Fangwei Li, Software Engineer, Google_
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " State of the Container World, February 2016 "
|
||||
date: Wednesday, March 01, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Hello, and welcome to the second installment of the Kubernetes state of the container world survey. At the beginning of February we sent out a survey about people’s usage of containers, and wrote about the [results from the January survey](http://blog.kubernetes.io/2016/02/state-of-container-world-january-2016.html). Here we are again, as before, while we try to reach a large and representative set of respondents, this survey was publicized across the social media account of myself and others on the Kubernetes team, so I expect some pro-container and Kubernetes bias in the data.We continue to try to get as large an audience as possible, and in that vein, please go and take the [March survey](https://docs.google.com/a/google.com/forms/d/1hlOEyjuN4roIbcAAUbDhs7xjNMoM8r-hqtixf6zUsp4/viewform) and share it with your friends and followers everywhere! Without further ado, the numbers...
|
||||
|
||||
## Containers continue to gain ground
|
||||
|
||||
In January, 71% of respondents were currently using containers, in February, 89% of respondents were currently using containers. The percentage of users not even considering containers also shrank from 4% in January to a surprising 0% in February. Will see if that holds consistent in March.Likewise, the usage of containers continued to march across the dev/canary/prod lifecycle. In all parts of the lifecycle, container usage increased:
|
||||
|
||||
|
||||
-
|
||||
Development: 80% -\> 88%
|
||||
-
|
||||
Test: 67% -\> 72%
|
||||
-
|
||||
Pre production: 41% -\> 55%
|
||||
-
|
||||
Production: 50% -\> 62%
|
||||
What is striking in this is that pre-production growth continued, even as workloads were clearly transitioned into true production. Likewise the share of people considering containers for production rose from 78% in January to 82% in February. Again we’ll see if the trend continues into March.
|
||||
|
||||
## Container and cluster sizes
|
||||
|
||||
We asked some new questions in the survey too, around container and cluster sizes, and there were some interesting numbers:
|
||||
|
||||
How many containers are you running?
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
How many machines are you running containers on?
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
So while container usage continues to grow, the size and scope continues to be quite modest, with more than 50% of users running fewer than 50 containers on fewer than 10 machines.
|
||||
|
||||
## Things stay the same
|
||||
|
||||
Across the orchestration space, things seemed pretty consistent between January and February (Kubernetes is quite popular with folks (54% -\> 57%), though again, please see the note at the top about the likely bias in our survey population. Shell scripts likewise are also quite popular and holding steady. You all certainly love your Bash (don’t worry, we do too ;)
|
||||
Likewise people continue to use cloud services both in raw IaaS form (10% on GCE, 30% on EC2, 2% on Azure) as well as cloud container services (16% for GKE, 11% on ECS, 1% on ACS). Though the most popular deployment target by far remains your laptop/desktop at ~53%.
|
||||
|
||||
## Raw data
|
||||
|
||||
As always, the complete raw data is available in a spreadsheet [here](https://docs.google.com/spreadsheets/d/126nnv9Q9avxDvC82irJGUDK3UODokILZOQe5X_WB9VQ/edit?usp=sharing).
|
||||
|
||||
## Conclusions
|
||||
|
||||
Containers continue to gain in popularity and usage. The world of orchestration is somewhat stabilizing, and cloud services continue to be a common place to run containers, though your laptop is even more popular.
|
||||
|
||||
And if you are just getting started with containers (or looking to move beyond your laptop) please visit us at [kubernetes.io](http://kubernetes.io/) and [Google Container Engine](https://cloud.google.com/container-engine/). ‘Till next month, please get your friends, relatives and co-workers to take our [March survey](https://docs.google.com/a/google.com/forms/d/1hlOEyjuN4roIbcAAUbDhs7xjNMoM8r-hqtixf6zUsp4/viewform)!
|
||||
|
||||
|
||||
|
||||
Thanks!
|
||||
|
||||
_-- Brendan Burns, Software Engineer, Google_
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Using Spark and Zeppelin to process big data on Kubernetes 1.2 "
|
||||
date: Thursday, March 30, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor's note: this is the fifth post in a [series of in-depth posts](http://blog.kubernetes.io/2016/03/five-days-of-kubernetes-12.html) on what's new in Kubernetes 1.2 _
|
||||
|
||||
With big data usage growing exponentially, many Kubernetes customers have expressed interest in running [Apache Spark](http://spark.apache.org/) on their Kubernetes clusters to take advantage of the portability and flexibility of containers. Fortunately, with Kubernetes 1.2, you can now have a platform that runs Spark and Zeppelin, and your other applications side-by-side.
|
||||
|
||||
|
||||
### Why Zeppelin?
|
||||
[Apache Zeppelin](https://zeppelin.incubator.apache.org/) is a web-based notebook that enables interactive data analytics. As one of its backends, Zeppelin connects to Spark. Zeppelin allows the user to interact with the Spark cluster in a simple way, without having to deal with a command-line interpreter or a Scala compiler.
|
||||
|
||||
|
||||
### Why Kubernetes?
|
||||
There are many ways to run Spark outside of Kubernetes:
|
||||
|
||||
|
||||
- You can run it using dedicated resources, in Standalone mode
|
||||
- You can run it on a [YARN](https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html) cluster, co-resident with Hadoop and HDFS
|
||||
- You can run it on a [Mesos](http://mesos.apache.org/) cluster alongside other Mesos applications
|
||||
|
||||
|
||||
|
||||
So why would you run Spark on Kubernetes?
|
||||
|
||||
|
||||
- A single, unified interface to your cluster: Kubernetes can manage a broad range of workloads; no need to deal with YARN/HDFS for data processing and a separate container orchestrator for your other applications.
|
||||
- Increased server utilization: share nodes between Spark and cloud-native applications. For example, you may have a streaming application running to feed a streaming Spark pipeline, or a nginx pod to serve web traffic — no need to statically partition nodes.
|
||||
- Isolation between workloads: Kubernetes' [Quality of Service](https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/proposals/resource-qos.md) mechanism allows you to safely co-schedule batch workloads like Spark on the same nodes as latency-sensitive servers.
|
||||
|
||||
|
||||
|
||||
### Launch Spark
|
||||
For this demo, we’ll be using [Google Container Engine](https://cloud.google.com/container-engine/) (GKE), but this should work anywhere you have installed a Kubernetes cluster. First, create a Container Engine cluster with storage-full scopes. These Google Cloud Platform scopes will allow the cluster to write to a private Google Cloud Storage Bucket (we’ll get to why you need that later):
|
||||
|
||||
```
|
||||
$ gcloud container clusters create spark --scopes storage-full
|
||||
--machine-type n1-standard-4
|
||||
```
|
||||
_Note_: We’re using n1-standard-4 (which are larger than the default node size) to demonstrate some features of Horizontal Pod Autoscaling. However, Spark works just fine on the default node size of n1-standard-1.
|
||||
|
||||
After the cluster’s created, you’re ready to launch Spark on Kubernetes using the config files in the Kubernetes GitHub repo:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/kubernetes/kubernetes.git
|
||||
$ kubectl create -f kubernetes/examples/spark
|
||||
```
|
||||
`‘kubernetes/examples/spark’` is a directory, so this command tells kubectl to create all of the Kubernetes objects defined in all of the YAML files in that directory. You don’t have to clone the entire repository, but it makes the steps of this demo just a little easier.
|
||||
|
||||
The pods (especially Apache Zeppelin) are somewhat large, so may take some time for Docker to pull the images. Once everything is running, you should see something similar to the following:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
spark-master-controller-v4v4y 1/1 Running 0 21h
|
||||
spark-worker-controller-7phix 1/1 Running 0 21h
|
||||
spark-worker-controller-hq9l9 1/1 Running 0 21h
|
||||
spark-worker-controller-vwei5 1/1 Running 0 21h
|
||||
zeppelin-controller-t1njl 1/1 Running 0 21h
|
||||
```
|
||||
You can see that Kubernetes is running one instance of Zeppelin, one Spark master and three Spark workers.
|
||||
|
||||
|
||||
### Set up the Secure Proxy to Zeppelin
|
||||
Next you’ll set up a secure proxy from your local machine to Zeppelin, so you can access the Zeppelin instance running in the cluster from your machine. (Note: You’ll need to change this command to the actual Zeppelin pod that was created on your cluster.)
|
||||
|
||||
```
|
||||
$ kubectl port-forward zeppelin-controller-t1njl 8080:8080
|
||||
```
|
||||
This establishes a secure link to the Kubernetes cluster and pod (`zeppelin-controller-t1njl`) and then forwards the port in question (8080) to local port 8080, which will allow you to use Zeppelin safely.
|
||||
|
||||
|
||||
### Now that I have Zeppelin up and running, what do I do with it?
|
||||
For our example, we’re going to show you how to build a simple movie recommendation model. This is based on the code [on the Spark website](http://spark.apache.org/docs/1.5.2/mllib-collaborative-filtering.html), modified slightly to make it interesting for Kubernetes.
|
||||
|
||||
Now that the secure proxy is up, visit [http://localhost:8080/](http://localhost:8080/). You should see an intro page like this:
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-rk6iWAauxGM/VvwPoE25QFI/AAAAAAAAA6c/NOBZzIWfTYEqJin-tWY1zrePPOqr3TZWQ/s1600/Spark2.png)
|
||||
|
||||
|
||||
Click “Import note,” give it an arbitrary name (e.g. “Movies”), and click “Add from URL.” For a URL, enter:
|
||||
|
||||
```
|
||||
https://gist.githubusercontent.com/zmerlynn/875fed0f587d12b08ec9/raw/6
|
||||
eac83e99caf712482a4937800b17bbd2e7b33c4/movies.json
|
||||
```
|
||||
Then click “Import Note.” This will give you a ready-made Zeppelin note for this demo. You should now have a “Movies” notebook (or whatever you named it). If you click that note, you should see a screen similar to this:
|
||||
|
||||
|
||||
[](https://2.bp.blogspot.com/-qyjtrUpXisg/VvwPvSPnWNI/AAAAAAAAA6g/Son7C2yWolk28KLSy63acGPnuFGjJEoeg/s1600/Spark1.png)
|
||||
|
||||
You can now click the Play button, near the top-right of the PySpark code block, and you’ll create a new, in-memory movie recommendation model! In the Spark application model, Zeppelin acts as a [Spark Driver Program](https://spark.apache.org/docs/1.5.2/cluster-overview.html), interacting with the Spark cluster master to get its work done. In this case, the driver program that’s running in the Zeppelin pod fetches the data and sends it to the Spark master, which farms it out to the workers, which crunch out a movie recommendation model using the code from the driver. With a larger data set in Google Cloud Storage (GCS), it would be easy to pull the data from GCS as well. In the next section, we’ll talk about how to save your data to GCS.
|
||||
|
||||
|
||||
### Working with Google Cloud Storage (Optional)
|
||||
For this demo, we’ll be using Google Cloud Storage, which will let us store our model data beyond the life of a single pod. Spark for Kubernetes is built with the [Google Cloud Storage](https://cloud.google.com/storage/) connector built-in. As long as you can access your data from a virtual machine in the Google Container Engine project where your Kubernetes nodes are running, you can access your data with the GCS connector on the Spark image.
|
||||
|
||||
If you want, you can change the variables at the top of the note so that the example will actually save and restore a model for the movie recommendation engine — just point those variables at a GCS bucket that you have access to. If you want to create a GCS bucket, you can do something like this on the command line:
|
||||
|
||||
```
|
||||
$ gsutil mb gs://my-spark-models
|
||||
```
|
||||
You’ll need to change this URI to something that is unique for you. This will create a bucket that you can use in the example above.
|
||||
|
||||
**Note** : Computing the model and saving it is much slower than computing the model and throwing it away. This is expected. However, if you plan to reuse a model, it’s faster to compute the model and save it and then restore it each time you want to use it, rather than throw away and recompute the model each time.
|
||||
|
||||
### Using Horizontal Pod Autoscaling with Spark (Optional)
|
||||
Spark is somewhat elastic to workers coming and going, which means we have an opportunity: we can use use [Kubernetes Horizontal Pod Autoscaling](http://kubernetes.io/docs/user-guide/horizontal-pod-autoscaling/) to scale-out the Spark worker pool automatically, setting a target CPU threshold for the workers and a minimum/maximum pool size. This obviates the need for having to configure the number of worker replicas manually.
|
||||
|
||||
Create the Autoscaler like this (note: if you didn’t change the machine type for the cluster, you probably want to limit the --max to something smaller):
|
||||
|
||||
```
|
||||
$ kubectl autoscale --min=1 --cpu-percent=80 --max=10 \
|
||||
rc/spark-worker-controller
|
||||
```
|
||||
To see the full effect of autoscaling, wait for the replication controller to settle back to one replica. Use `‘kubectl get rc’` and wait for the “replicas” column on spark-worker-controller to fall back to 1.
|
||||
|
||||
The workload we ran before ran too quickly to be terribly interesting for HPA. To change the workload to actually run long enough to see autoscaling become active, change the “rank = 100” line in the code to “rank = 200.” After you hit play, the Spark worker pool should rapidly increase to 20 pods. It will take up to 5 minutes after the job completes before the worker pool falls back down to one replica.
|
||||
|
||||
|
||||
### Conclusion
|
||||
In this article, we showed you how to run Spark and Zeppelin on Kubernetes, as well as how to use Google Cloud Storage to store your Spark model and how to use Horizontal Pod Autoscaling to dynamically size your Spark worker pool.
|
||||
|
||||
This is the first in a series of articles we’ll be publishing on how to run big data frameworks on Kubernetes — so stay tuned!
|
||||
|
||||
Please join our community and help us build the future of Kubernetes! There are many ways to participate. If you’re particularly interested in Kubernetes and big data, you’ll be interested in:
|
||||
|
||||
- Our [Big Data slack channel](https://kubernetes.slack.com/messages/sig-big-data/)
|
||||
- Our [Kubernetes Big Data Special Interest Group email list](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data)
|
||||
- The Big Data “Special Interest Group,” which meets every Monday at 1pm (13h00) Pacific Time at [SIG-Big-Data hangout ](https://plus.google.com/hangouts/_/google.com/big-data)
|
||||
And of course for more information about the project in general, go to [www.kubernetes.io](http://www.kubernetes.io/).
|
||||
|
||||
-- _Zach Loafman, Software Engineer, Google_
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Adding Support for Kubernetes in Rancher "
|
||||
date: Saturday, April 08, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today’s guest post is written by Darren Shepherd, Chief Architect at Rancher Labs, an open-source software platform for managing containers._
|
||||
|
||||
Over the last year, we’ve seen a tremendous increase in the number of companies looking to leverage containers in their software development and IT organizations. To achieve this, organizations have been looking at how to build a centralized container management capability that will make it simple for users to get access to containers, while centralizing visibility and control with the IT organization. In 2014 we started the open-source Rancher project to address this by building a management platform for containers.
|
||||
|
||||
Recently we shipped Rancher v1.0. With this latest release, [Rancher](http://www.rancher.com/), an open-source software platform for managing containers, now supports Kubernetes as a container orchestration framework when creating environments. Now, launching a Kubernetes environment with Rancher is fully automated, delivering a functioning cluster in just 5-10 minutes.
|
||||
|
||||
We created Rancher to provide organizations with a complete management platform for containers. As part of that, we’ve always supported deploying Docker environments natively using the Docker API and Docker Compose. Since its inception, we’ve been impressed with the operational maturity of Kubernetes, and with this release, we’re making it possible to deploy a variety of container orchestration and scheduling frameworks within the same management platform.
|
||||
|
||||
Adding Kubernetes gives users access to one of the fastest growing platforms for deploying and managing containers in production. We’ll provide first-class Kubernetes support in Rancher going forward and continue to support native Docker deployments.
|
||||
|
||||
**Bringing Kubernetes to Rancher**
|
||||
|
||||
|
||||
{: .big-img}
|
||||
|
||||
Our platform was already extensible for a variety of different packaging formats, so we were optimistic about embracing Kubernetes. We were right, working with the Kubernetes project has been a fantastic experience as developers. The design of the project made this incredibly easy, and we were able to utilize plugins and extensions to build a distribution of Kubernetes that leveraged our infrastructure and application services. For instance, we were able to plug in Rancher’s software defined networking, storage management, load balancing, DNS and infrastructure management functions directly into Kubernetes, without even changing the code base.
|
||||
|
||||
|
||||
|
||||
Even better, we have been able to add a number of services around the core Kubernetes functionality. For instance, we implemented our popular [application catalog on top of Kubernetes](https://github.com/rancher/community-catalog/tree/master/kubernetes-templates). Historically we’ve used Docker Compose to define application templates, but with this release, we now support Kubernetes services, replication controllers and pods to deploy applications. With the catalog, users connect to a git repo and automate deployment and upgrade of an application deployed as Kubernetes services. Users then configure and deploy a complex multi-node enterprise application with one click of a button. Upgrades are fully automated as well, and pushed out centrally to users.
|
||||
|
||||
|
||||
|
||||
**Giving Back**
|
||||
|
||||
|
||||
|
||||
Like Kubernetes, Rancher is an open-source software project, free to use by anyone, and given to the community without any restrictions. You can find all of the source code, upcoming releases and issues for Rancher on [GitHub](http://www.github.com/rancher/rancher). We’re thrilled to be joining the Kubernetes community, and look forward to working with all of the other contributors. View a demo of the new Kubernetes support in Rancher [here](http://rancher.com/kubernetes/).
|
||||
|
||||
|
||||
|
||||
_-- Darren Shepherd, Chief Architect, Rancher Labs_
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " SIG-UI: the place for building awesome user interfaces for Kubernetes "
|
||||
date: Thursday, April 20, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: This week we’re featuring [Kubernetes Special Interest Groups](https://github.com/kubernetes/kubernetes/wiki/Special-Interest-Groups-(SIGs)); Today’s post is by the SIG-UI team describing their mission and showing the cool projects they work on._
|
||||
|
||||
Kubernetes has been handling production workloads for a long time now (see [case studies](http://kubernetes.io/#talkToUs)). It runs on public, private and hybrid clouds as well as bare metal. It can handle all types of workloads (web serving, batch and mixed) and enable [zero-downtime rolling updates](https://www.youtube.com/watch?v=9C6YeyyUUmI). It abstracts service discovery, load balancing and storage so that applications running on Kubernetes aren’t restricted to a specific cloud provider or environment.
|
||||
|
||||
The abundance of features that Kubernetes offers is fantastic, but implementing a user-friendly, easy-to-use user interface is quite challenging. How shall all the features be presented to users? How can we gradually expose the Kubernetes concepts to newcomers, while empowering experts? There are lots of other challenges like these that we’d like to solve. This is why we created a special interest group for Kubernetes user interfaces.
|
||||
|
||||
**Meet SIG-UI: the place for building awesome user interfaces for Kubernetes**
|
||||
The SIG UI mission is simple: we want to radically improve the user experience of all Kubernetes graphical user interfaces. Our goal is to craft UIs that are used by devs, ops and resource managers across their various environments, that are simultaneously intuitive enough for newcomers to Kubernetes to understand and use.
|
||||
|
||||
SIG UI members have been independently working on a variety of UIs for Kubernetes. So far, the projects we’ve seen have been either custom internal tools coupled to their company workflows, or specialized API frontends. We have realized that there is a need for a universal UI that can be used standalone or be a standard base for custom vendors. That’s how we started the [Dashboard UI](http://github.com/kubernetes/dashboard) project. Version 1.0 has been recently released and is included with Kubernetes as a cluster addon. The Dashboard project was recently featured in a [talk at KubeCon EU](https://www.youtube.com/watch?v=sARH5zQhovE), and we have ambitious plans for the future!
|
||||
|
||||
|  |
|
||||
| Dashboard UI v1.0 home screen showing applications running in a Kubernetes cluster. |
|
||||
|
||||
|
||||
Since the initial release of the Dashboard UI we have been thinking hard about what to do next and what users of UIs for Kubernetes think about our plans. We’ve had many internal discussions on this topic, but most importantly, reached out directly to our users. We created a questionnaire asking a few demographic questions as well as questions for prioritizing use cases. We received more than 200 responses from a wide spectrum of user types, which in turn helped to shape the Dashboard UI’s [current roadmap](https://github.com/kubernetes/dashboard/blob/master/docs/devel/roadmap.md). Our members from LiveWyer summarised the results in a [nice infographic](http://static.lwy.io/img/kubernetes_dashboard_infographic.png).
|
||||
|
||||
**Connect with us**
|
||||
|
||||
We believe that collaboration is the key to SIG UI success, so we invite everyone to connect with us. Whether you’re a Kubernetes user who wants to provide feedback, develop your own UIs, or simply want to collaborate on the Dashboard UI project, feel free to get in touch. There are many ways you can contact us:
|
||||
|
||||
- Email us at the [sig-ui mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-ui)
|
||||
- Chat with us on the [Kubernetes Slack](http://slack.k8s.io/): #[sig-ui channel](https://kubernetes.slack.com/messages/sig-ui/)
|
||||
- Join our meetings: biweekly on Wednesdays 9AM PT (US friendly) and weekly 10AM CET (Europe friendly). See the [SIG-UI calendar](https://calendar.google.com/calendar/embed?src=google.com_52lm43hc2kur57dgkibltqc6kc%40group.calendar.google.com&ctz=Europe/Warsaw) for details.
|
||||
|
||||
_-- Piotr Bryk, Software Engineer, Google_
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Configuration management with Containers "
|
||||
date: Tuesday, April 04, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: this is our seventh post in a [series of in-depth posts](http://blog.kubernetes.io/2016/03/five-days-of-kubernetes-12.html) on what's new in Kubernetes 1.2_
|
||||
|
||||
A [good practice](http://12factor.net/config) when writing applications is to separate application code from configuration. We want to enable application authors to easily employ this pattern within Kubernetes. While the Secrets API allows separating information like credentials and keys from an application, no object existed in the past for ordinary, non-secret configuration. In [Kubernetes 1.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md/#v120), we've added a new API resource called ConfigMap to handle this type of configuration data.
|
||||
|
||||
|
||||
#### **The basics of ConfigMap**
|
||||
The ConfigMap API is simple conceptually. From a data perspective, the ConfigMap type is just a set of key-value pairs. Applications are configured in different ways, so we need to be flexible about how we let users store and consume configuration data. There are three ways to consume a ConfigMap in a pod:
|
||||
|
||||
|
||||
- Command line arguments
|
||||
- Environment variables
|
||||
- Files in a volume
|
||||
|
||||
These different methods lend themselves to different ways of modeling the data being consumed. To be as flexible as possible, we made ConfigMap hold both fine- and/or coarse-grained data. Further, because applications read configuration settings from both environment variables and files containing configuration data, we built ConfigMap to support either method of access. Let’s take a look at an example ConfigMap that contains both types of configuration:
|
||||
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
|
||||
kind: ConfigMap
|
||||
|
||||
metadata:
|
||||
|
||||
Name: example-configmap
|
||||
|
||||
data:
|
||||
|
||||
# property-like keys
|
||||
|
||||
game-properties-file-name: game.properties
|
||||
|
||||
ui-properties-file-name: ui.properties
|
||||
|
||||
# file-like keys
|
||||
|
||||
game.properties: |
|
||||
|
||||
enemies=aliens
|
||||
|
||||
lives=3
|
||||
|
||||
enemies.cheat=true
|
||||
|
||||
enemies.cheat.level=noGoodRotten
|
||||
|
||||
secret.code.passphrase=UUDDLRLRBABAS
|
||||
|
||||
secret.code.allowed=true
|
||||
|
||||
secret.code.lives=30
|
||||
|
||||
ui.properties: |
|
||||
|
||||
color.good=purple
|
||||
|
||||
color.bad=yellow
|
||||
|
||||
allow.textmode=true
|
||||
|
||||
how.nice.to.look=fairlyNice
|
||||
```
|
||||
|
||||
|
||||
Users that have used Secrets will find it easy to begin using ConfigMap — they’re very similar. One major difference in these APIs is that Secret values are stored as byte arrays in order to support storing binaries like SSH keys. In JSON and YAML, byte arrays are serialized as base64 encoded strings. This means that it’s not easy to tell what the content of a Secret is from looking at the serialized form. Since ConfigMap is intended to hold only configuration information and not binaries, values are stored as strings, and thus are readable in the serialized form.
|
||||
|
||||
|
||||
|
||||
We want creating ConfigMaps to be as flexible as storing data in them. To create a ConfigMap object, we’ve added a kubectl command called `kubectl create configmap` that offers three different ways to specify key-value pairs:
|
||||
|
||||
|
||||
- Specify literal keys and value
|
||||
- Specify an individual file
|
||||
- Specify a directory to create keys for each file
|
||||
|
||||
|
||||
|
||||
These different options can be mixed, matched, and repeated within a single command:
|
||||
|
||||
```
|
||||
$ kubectl create configmap my-config \
|
||||
|
||||
--from-literal=literal-key=literal-value \
|
||||
|
||||
--from-file=ui.properties \
|
||||
--from=file=path/to/config/dir
|
||||
```
|
||||
Consuming ConfigMaps is simple and will also be familiar to users of Secrets. Here’s an example of a Deployment that uses the ConfigMap above to run an imaginary game server:
|
||||
|
||||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
|
||||
kind: Deployment
|
||||
|
||||
metadata:
|
||||
|
||||
name: configmap-example-deployment
|
||||
|
||||
labels:
|
||||
|
||||
name: configmap-example-deployment
|
||||
|
||||
spec:
|
||||
|
||||
replicas: 1
|
||||
|
||||
selector:
|
||||
|
||||
matchLabels:
|
||||
|
||||
name: configmap-example
|
||||
|
||||
template:
|
||||
|
||||
metadata:
|
||||
|
||||
labels:
|
||||
|
||||
name: configmap-example
|
||||
|
||||
spec:
|
||||
|
||||
containers:
|
||||
|
||||
- name: game-container
|
||||
|
||||
image: imaginarygame
|
||||
|
||||
command: ["game-server", "--config-dir=/etc/game/cfg"]
|
||||
|
||||
env:
|
||||
|
||||
# consume the property-like keys in environment variables
|
||||
|
||||
- name: GAME\_PROPERTIES\_NAME
|
||||
|
||||
valueFrom:
|
||||
|
||||
configMapKeyRef:
|
||||
|
||||
name: example-configmap
|
||||
|
||||
key: game-properties-file-name
|
||||
|
||||
- name: UI\_PROPERTIES\_NAME
|
||||
|
||||
valueFrom:
|
||||
|
||||
configMapKeyRef:
|
||||
|
||||
name: example-configmap
|
||||
|
||||
key: ui-properties-file-name
|
||||
|
||||
volumeMounts:
|
||||
|
||||
- name: config-volume
|
||||
|
||||
mountPath: /etc/game
|
||||
|
||||
volumes:
|
||||
|
||||
# consume the file-like keys of the configmap via volume plugin
|
||||
|
||||
- name: config-volume
|
||||
|
||||
configMap:
|
||||
|
||||
name: example-configmap
|
||||
|
||||
items:
|
||||
|
||||
- key: ui.properties
|
||||
|
||||
path: cfg/ui.properties
|
||||
|
||||
- key: game.properties
|
||||
|
||||
path: cfg/game.properties
|
||||
restartPolicy: Never
|
||||
```
|
||||
In the above example, the Deployment uses keys of the ConfigMap via two of the different mechanisms available. The property-like keys of the ConfigMap are used as environment variables to the single container in the Deployment template, and the file-like keys populate a volume. For more details, please see the [ConfigMap docs](http://kubernetes.io/docs/user-guide/configmap/).
|
||||
|
||||
We hope that these basic primitives are easy to use and look forward to seeing what people build with ConfigMaps. Thanks to the community members that provided feedback about this feature. Special thanks also to Tamer Tas who made a great contribution to the proposal and implementation of ConfigMap.
|
||||
|
||||
If you’re interested in Kubernetes and configuration, you’ll want to participate in:
|
||||
|
||||
- Our Configuration [Slack channel](https://kubernetes.slack.com/messages/sig-configuration/)
|
||||
- Our [Kubernetes Configuration Special Interest Group](https://groups.google.com/forum/#!forum/kubernetes-sig-config) email list
|
||||
- The Configuration “Special Interest Group,” which meets weekly on Wednesdays at 10am (10h00) Pacific Time at [SIG-Config hangout](https://hangouts.google.com/hangouts/_/google.com/kube-sig-config)
|
||||
|
||||
|
||||
|
||||
And of course for more information about the project in general, go to [www.kubernetes.io](http://www.kubernetes.io/) and follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio).
|
||||
|
||||
-- _Paul Morie, Senior Software Engineer, Red Hat_
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Container survey results - March 2016 "
|
||||
date: Saturday, April 08, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Last month, we had our third installment of our container survey and today we look at the results. (raw data is available [here](https://docs.google.com/spreadsheets/d/13356w6I2xxKnmjblFSsKGVANZGGlX2yFMzb8eOIe2Oo/edit?usp=sharing))
|
||||
|
||||
|
||||
Looking at the headline number, “how many people are using containers” we see a decrease in the number of people currently using containers from 89% to 80%. Obviously, we can’t be certain for the cause of this decrease, but it’s my believe that the previous number was artificially high due to sampling biases and we did a better job getting a broader reach of participants in the March survey and so the March numbers more accurately represent what is going on in the world.
|
||||
|
||||
|
||||
Along the lines of getting an unbiased sample, I’m excited to announce that going forward, we will be partnering with [The New Stack](http://thenewstack.io/) [and the](http://thenewstack.io/)[Cloud Native Compute Foundation](http://cncf.io/) to publicize and distribute this container survey. This partnership will enable us to reach a broader audience than we are reaching and thus obtain a significantly more unbiased sample and representative portrayal of current container usage. I’m really excited about this collaboration!
|
||||
|
||||
|
||||
But without further ado, more on the data.
|
||||
|
||||
|
||||
For the rest of the numbers, the March survey shows steady continuation of the numbers that we saw in February. Most of the container usage is still in Development and Testing, though a solid majority (60%) are using it for production as well. For the remaining folks using containers there continues to be a plan to bring containers to production as the “I am planning to” number for production use matches up nearly identically with the numbers for people currently in testing.
|
||||
|
||||
|
||||
Physical and virtual machines continue to be the most popular places to deploy containers, though the March survey shows a fairly substantial drop (48% -\> 35%) in people deploying to physical machines.
|
||||
|
||||
|
||||
Likewise hosted container services show growth, with nearly every service showing some growth. [Google Container Engine](https://cloud.google.com/container-engine/) continues to be the most popular in the survey, followed by the [Amazon EC2 Container Service](https://aws.amazon.com/ecs/). It will be interesting to see how those numbers change as we move to the New Stack survey.
|
||||
|
||||
|
||||
Finally, [Kubernetes](http://kubernetes.io/) is still the favorite for container manager, with [Bash scripts](http://tldp.org/HOWTO/Bash-Prog-Intro-HOWTO.html) are still in second place. As with the container service provider numbers I’ll be quite interested to see what this looks like with a broader sample set.
|
||||
|
||||
|
||||
Finally, the absolute use of containers appears to be ticking up. The number of people running more than 250 containers has grown from 12% to nearly 20%. And the number people running containers on 50 or more machines has grown from 10% to 18%.
|
||||
|
||||
As always, the raw data is available for you to analyze [here](https://docs.google.com/spreadsheets/d/13356w6I2xxKnmjblFSsKGVANZGGlX2yFMzb8eOIe2Oo/edit?usp=sharing).
|
||||
|
||||
--Brendan Burns, Software Engineer, Google
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Introducing the Kubernetes OpenStack Special Interest Group "
|
||||
date: Saturday, April 22, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: This week we’re featuring [Kubernetes Special Interest Groups](https://github.com/kubernetes/kubernetes/wiki/Special-Interest-Groups-(SIGs)); Today’s post is by the SIG-OpenStack team about their mission to facilitate ideas between the OpenStack and Kubernetes communities. _
|
||||
|
||||
|
||||
|
||||
The community around the Kubernetes project includes a number of Special Interest Groups (SIGs) for the purposes of facilitating focused discussions relating to important subtopics between interested contributors. Today we would like to highlight the [Kubernetes OpenStack SIG](https://github.com/kubernetes/kubernetes/wiki/SIG-Openstack) focused on the interaction between [Kubernetes](http://kubernetes.io/) and [OpenStack](http://www.openstack.org/), the Open Source cloud computing platform.
|
||||
|
||||
There are two high level scenarios that are being discussed in the SIG:
|
||||
|
||||
|
||||
- Using Kubernetes to manage containerized workloads running on top of OpenStack
|
||||
- Using Kubernetes to manage containerized OpenStack services themselves
|
||||
|
||||
In both cases the intent is to help facilitate the inter-pollination of ideas between the growing Kubernetes and OpenStack communities. The OpenStack community itself includes a number of projects broadly aimed at assisting with both of these use cases including:
|
||||
|
||||
|
||||
- [Kolla](http://governance.openstack.org/reference/projects/kolla.html) - Provides OpenStack service containers and deployment tooling for operating OpenStack clouds.
|
||||
- [Kuryr](http://governance.openstack.org/reference/projects/kuryr.html) - Provides bridges between container networking/storage framework models and OpenStack infrastructure services.
|
||||
- [Magnum](http://governance.openstack.org/reference/projects/magnum.html) - Provides containers as a service for OpenStack.
|
||||
- [Murano](http://governance.openstack.org/reference/projects/murano.html) - Provides an Application Catalog service for OpenStack including support for Kubernetes itself, and for containerized applications, managed by Kubernetes.
|
||||
|
||||
|
||||
There are also a number of example templates available to assist with using the OpenStack Orchestration service ([Heat](http://governance.openstack.org/reference/projects/heat.html)) to deploy and configure either Kubernetes itself or offerings built around Kubernetes such as [OpenShift](https://github.com/redhat-openstack/openshift-on-openstack/). While each of these approaches has their own pros and cons the common theme is the ability, or potential ability, to use Kubernetes and where available leverage deeper integration between it and the OpenStack services themselves.
|
||||
|
||||
|
||||
|
||||
Current SIG participants represent a broad array of organizations including but not limited to: CoreOS, eBay, GoDaddy, Google, IBM, Intel, Mirantis, OpenStack Foundation, Rackspace, Red Hat, Romana, Solinea, VMware.
|
||||
|
||||
|
||||
|
||||
The SIG is currently working on [collating information](https://docs.google.com/document/d/1wNl_xcITKwzUsFNRu5npUTJuh9pbJAdzzpG6Cd2Fcp0/edit?ts=57033dd6) about these approaches to help Kubernetes users navigate the OpenStack ecosystem along with feedback on which approaches to the requirements presented work best for operators.
|
||||
|
||||
|
||||
|
||||
**Kubernetes at OpenStack Summit Austin**
|
||||
|
||||
|
||||
|
||||
The [OpenStack Summit](https://www.openstack.org/summit/austin-2016/) is in Austin from April 25th to 29th and is packed with sessions related to containers and container management using Kubernetes. If you plan on joining us in Austin you can review the [schedule](https://www.openstack.org/summit/austin-2016/summit-schedule/) online where you will find a number of sessions, both in the form of presentations and hands on workshops, relating to [Kubernetes](https://www.openstack.org/summit/austin-2016/summit-schedule/global-search?t=Kubernetes) and [containerization](https://www.openstack.org/summit/austin-2016/summit-schedule/global-search?t=containers) at large. Folks from the Kubernetes OpenStack SIG are particularly keen to get the thoughts of operators in the “[Ops: Containers on OpenStack](https://www.openstack.org/summit/austin-2016/summit-schedule/events/9500)” and “[Ops: OpenStack in Containers](https://www.openstack.org/summit/austin-2016/summit-schedule/events/9501)” working sessions.
|
||||
|
||||
|
||||
|
||||
Kubernetes community experts will also be on hand in the Container Expert Lounge to answer your burning questions. You can find the lounge on the 4th floor of the Austin Convention Center.
|
||||
|
||||
|
||||
|
||||
Follow [@kubernetesio](https://twitter.com/kubernetesio) and [#OpenStackSummit](https://twitter.com/search?q=%23openstacksummit) to keep up with the latest updates on Kubernetes at OpenStack Summit throughout the week.
|
||||
|
||||
**Connect With Us**
|
||||
|
||||
If you’re interested in Kubernetes and OpenStack, there are several ways to participate:
|
||||
|
||||
|
||||
- Email us at the [SIG-OpenStack mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack)
|
||||
- Chat with us on the [Kubernetes Slack](http://slack.k8s.io/): [#sig-openstack channel](https://kubernetes.slack.com/messages/sig-openstack/) and #openstack-kubernetes on freenode
|
||||
- Join our meeting occurring every second Tuesday at 2 PM PDT; attend via the zoom videoconference found in our [meeting notes](https://docs.google.com/document/d/1iAQ3LSF_Ky6uZdFtEZPD_8i6HXeFxIeW4XtGcUJtPyU/edit#).
|
||||
|
||||
|
||||
|
||||
_-- Steve Gordon, Principal Product Manager at Red Hat, and Ihor Dvoretskyi, OpenStack Operations Engineer at Mirantis_
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " SIG-Networking: Kubernetes Network Policy APIs Coming in 1.3 "
|
||||
date: Tuesday, April 18, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: This week we’re featuring [Kubernetes Special Interest Groups](https://github.com/kubernetes/kubernetes/wiki/Special-Interest-Groups-(SIGs)); Today’s post is by the Network-SIG team describing network policy APIs coming in 1.3 - policies for security, isolation and multi-tenancy._
|
||||
|
||||
The [Kubernetes network SIG](https://kubernetes.slack.com/messages/sig-network/) has been meeting regularly since late last year to work on bringing network policy to Kubernetes and we’re starting to see the results of this effort.
|
||||
|
||||
One problem many users have is that the open access network policy of Kubernetes is not suitable for applications that need more precise control over the traffic that accesses a pod or service. Today, this could be a multi-tier application where traffic is only allowed from a tier’s neighbor. But as new Cloud Native applications are built by composing microservices, the ability to control traffic as it flows among these services becomes even more critical.
|
||||
|
||||
In most IaaS environments (both public and private) this kind of control is provided by allowing VMs to join a ‘security group’ where traffic to members of the group is defined by a network policy or Access Control List (ACL) and enforced by a network packet filter.
|
||||
|
||||
The Network SIG started the effort by identifying [specific use case scenarios](https://docs.google.com/document/d/1blfqiH4L_fpn33ZrnQ11v7LcYP0lmpiJ_RaapAPBbNU/edit?pref=2&pli=1#) that require basic network isolation for enhanced security. Getting the API right for these simple and common use cases is important because they are also the basis for the more sophisticated network policies necessary for multi-tenancy within Kubernetes.
|
||||
|
||||
From these scenarios several possible approaches were considered and a minimal [policy specification](https://docs.google.com/document/d/1qAm-_oSap-f1d6a-xRTj6xaH1sYQBfK36VyjB5XOZug/edit) was defined. The basic idea is that if isolation were enabled on a per namespace basis, then specific pods would be selected where specific traffic types would be allowed.
|
||||
|
||||
The simplest way to quickly support this experimental API is in the form of a ThirdPartyResource extension to the API Server, which is possible today in Kubernetes 1.2.
|
||||
|
||||
If you’re not familiar with how this works, the Kubernetes API can be extended by defining ThirdPartyResources that create a new API endpoint at a specified URL.
|
||||
|
||||
#### third-party-res-def.yaml
|
||||
```
|
||||
kind: ThirdPartyResource
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
|
||||
metadata:
|
||||
|
||||
name: network-policy.net.alpha.kubernetes.io
|
||||
|
||||
description: "Network policy specification"
|
||||
|
||||
versions:
|
||||
|
||||
- name: v1alpha1
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$kubectl create -f third-party-res-def.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
This will create an API endpoint (one for each namespace):
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
/net.alpha.kubernetes.io/v1alpha1/namespace/default/networkpolicys/
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Third party network controllers can now listen on these endpoints and react as necessary when resources are created, modified or deleted. _Note: With the upcoming release of Kubernetes 1.3 - when the Network Policy API is released in beta form - there will be no need to create a ThirdPartyResource API endpoint as shown above._
|
||||
|
||||
|
||||
|
||||
Network isolation is off by default so that all pods can communicate as they normally do. However, it’s important to know that once network isolation is enabled, all traffic to all pods, in all namespaces is blocked, which means that enabling isolation is going to change the behavior of your pods
|
||||
|
||||
|
||||
|
||||
Network isolation is enabled by defining the _network-isolation_ annotation on namespaces as shown below:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
net.alpha.kubernetes.io/network-isolation: [on | off]
|
||||
```
|
||||
|
||||
|
||||
|
||||
Once network isolation is enabled, explicit network policies **must be applied** to enable pod communication.
|
||||
|
||||
A policy specification can be applied to a namespace to define the details of the policy as shown below:
|
||||
|
||||
|
||||
|
||||
```
|
||||
POST /apis/net.alpha.kubernetes.io/v1alpha1/namespaces/tenant-a/networkpolicys/
|
||||
|
||||
|
||||
{
|
||||
|
||||
"kind": "NetworkPolicy",
|
||||
|
||||
"metadata": {
|
||||
|
||||
"name": "pol1"
|
||||
|
||||
},
|
||||
|
||||
"spec": {
|
||||
|
||||
"allowIncoming": {
|
||||
|
||||
"from": [
|
||||
|
||||
{ "pods": { "segment": "frontend" } }
|
||||
|
||||
],
|
||||
|
||||
"toPorts": [
|
||||
|
||||
{ "port": 80, "protocol": "TCP" }
|
||||
|
||||
]
|
||||
|
||||
},
|
||||
|
||||
"podSelector": { "segment": "backend" }
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
In this example, the ‘ **tenant-a** ’ namespace would get policy ‘ **pol1** ’ applied as indicated. Specifically, pods with the **segment** label ‘ **backend** ’ would allow TCP traffic on port 80 from pods with the **segment** label ‘ **frontend** ’ to be received.
|
||||
|
||||
|
||||
|
||||
Today, [Romana](http://romana.io/), [OpenShift](https://www.openshift.com/), [OpenContrail](http://www.opencontrail.org/) and [Calico](http://projectcalico.org/) support network policies applied to namespaces and pods. Cisco and VMware are working on implementations as well. Both Romana and Calico demonstrated these capabilities with Kubernetes 1.2 recently at KubeCon. You can watch their presentations here: [Romana](https://www.youtube.com/watch?v=f-dLKtK6qCs) ([slides](http://www.slideshare.net/RomanaProject/kubecon-london-2016-ronana-cloud-native-sdn)), [Calico](https://www.youtube.com/watch?v=p1zfh4N4SX0) ([slides](http://www.slideshare.net/kubecon/kubecon-eu-2016-secure-cloudnative-networking-with-project-calico)).
|
||||
|
||||
|
||||
|
||||
**How does it work?**
|
||||
|
||||
|
||||
|
||||
Each solution has their their own specific implementation details. Today, they rely on some kind of on-host enforcement mechanism, but future implementations could also be built that apply policy on a hypervisor, or even directly by the network itself.
|
||||
|
||||
|
||||
|
||||
External policy control software (specifics vary across implementations) will watch the new API endpoint for pods being created and/or new policies being applied. When an event occurs that requires policy configuration, the listener will recognize the change and a controller will respond by configuring the interface and applying the policy. The diagram below shows an API listener and policy controller responding to updates by applying a network policy locally via a host agent. The network interface on the pods is configured by a CNI plugin on the host (not shown).
|
||||
|
||||
|
||||
|
||||
{: .big-img}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
If you’ve been holding back on developing applications with Kubernetes because of network isolation and/or security concerns, these new network policies go a long way to providing the control you need. No need to wait until Kubernetes 1.3 since network policy is available now as an experimental API enabled as a ThirdPartyResource.
|
||||
|
||||
|
||||
|
||||
If you’re interested in Kubernetes and networking, there are several ways to participate - join us at:
|
||||
|
||||
- Our [Networking slack channel](https://kubernetes.slack.com/messages/sig-network/)
|
||||
- Our [Kubernetes Networking Special Interest Group](https://groups.google.com/forum/#!forum/kubernetes-sig-network) email list
|
||||
|
||||
|
||||
The Networking “Special Interest Group,” which meets bi-weekly at 3pm (15h00) Pacific Time at [SIG-Networking hangout](https://zoom.us/j/5806599998).
|
||||
|
||||
|
||||
_--Chris Marino, Co-Founder, Pani Networks_
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " How to deploy secure, auditable, and reproducible Kubernetes clusters on AWS "
|
||||
date: Saturday, April 15, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
_Today’s guest post is written by Colin Hom, infrastructure engineer at [CoreOS](https://coreos.com/), the company delivering Google’s Infrastructure for Everyone Else (#GIFEE) and running the world's containers securely on CoreOS Linux, Tectonic and Quay._
|
||||
|
||||
_Join us at [CoreOS Fest Berlin](https://coreos.com/fest/), the Open Source Distributed Systems Conference, and learn more about CoreOS and Kubernetes._
|
||||
|
||||
At CoreOS, we're all about deploying Kubernetes in production at scale. Today we are excited to share a tool that makes deploying Kubernetes on Amazon Web Services (AWS) a breeze. Kube-aws is a tool for deploying auditable and reproducible Kubernetes clusters to AWS, currently used by CoreOS to spin up production clusters.
|
||||
|
||||
Today you might be putting the Kubernetes components together in a more manual way. With this helpful tool, Kubernetes is delivered in a streamlined package to save time, minimize interdependencies and quickly create production-ready deployments.
|
||||
|
||||
A simple templating system is leveraged to generate cluster configuration as a set of declarative configuration templates that can be version controlled, audited and re-deployed. Since the entirety of the provisioning is by [AWS CloudFormation](https://aws.amazon.com/cloudformation/) and cloud-init, there’s no need for external configuration management tools on your end. Batteries included!
|
||||
|
||||
To skip the talk and go straight to the project, check out [the latest release of kube-aws](https://github.com/coreos/coreos-kubernetes/releases), which supports Kubernetes 1.2.x. To get your cluster running, [check out the documentation](https://coreos.com/kubernetes/docs/latest/kubernetes-on-aws.html).
|
||||
|
||||
**Why kube-aws? Security, auditability and reproducibility**
|
||||
|
||||
Kube-aws is designed with three central goals in mind.
|
||||
|
||||
|
||||
**Secure** : TLS assets are encrypted via the [AWS Key Management Service (KMS)](https://aws.amazon.com/kms/) before being embedded in the CloudFormation JSON. By managing [IAM policy](http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) for the KMS key independently, an operator can decouple operational access to the CloudFormation stack from access to the TLS secrets.
|
||||
|
||||
|
||||
|
||||
**Auditable** : kube-aws is built around the concept of cluster assets. These configuration and credential assets represent the complete description of the cluster. Since KMS is used to encrypt TLS assets, you can feel free to check your unencrypted stack JSON into version control as well!
|
||||
|
||||
|
||||
|
||||
**Reproducible** : The _--export_ option packs your parameterized cluster definition into a single JSON file which defines a CloudFormation stack. This file can be version controlled and submitted directly to the CloudFormation API via existing deployment tooling, if desired.
|
||||
|
||||
|
||||
**How to get started with kube-aws**
|
||||
|
||||
|
||||
|
||||
On top of this foundation, kube-aws implements features that make Kubernetes deployments on AWS easier to manage and more flexible. Here are some examples.
|
||||
|
||||
|
||||
|
||||
**Route53 Integration** : Kube-aws can manage your cluster DNS records as part of the provisioning process.
|
||||
|
||||
|
||||
|
||||
cluster.yaml
|
||||
```
|
||||
externalDNSName: my-cluster.kubernetes.coreos.com
|
||||
|
||||
createRecordSet: true
|
||||
|
||||
hostedZone: kubernetes.coreos.com
|
||||
|
||||
recordSetTTL: 300
|
||||
```
|
||||
|
||||
|
||||
|
||||
**Existing VPC Support** : Deploy your cluster to an existing VPC.
|
||||
|
||||
|
||||
|
||||
cluster.yaml
|
||||
|
||||
|
||||
```
|
||||
vpcId: vpc-xxxxx
|
||||
|
||||
routeTableId: rtb-xxxxx
|
||||
```
|
||||
|
||||
|
||||
|
||||
**Validation** : Kube-aws supports validation of cloud-init and CloudFormation definitions, along with any external resources that the cluster stack will integrate with. For example, here’s a cloud-config with a misspelled parameter:
|
||||
|
||||
|
||||
|
||||
userdata/cloud-config-worker
|
||||
|
||||
|
||||
```
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
|
||||
flannel:
|
||||
interrface: $private\_ipv4
|
||||
etcd\_endpoints: {{ .ETCDEndpoints }}
|
||||
```
|
||||
|
||||
|
||||
|
||||
$ kube-aws validate
|
||||
|
||||
|
||||
\> Validating UserData...
|
||||
Error: cloud-config validation errors:
|
||||
UserDataWorker: line 4: warning: unrecognized key "interrface"
|
||||
|
||||
|
||||
|
||||
To get started, check out the [kube-aws documentation](https://coreos.com/kubernetes/docs/latest/kubernetes-on-aws.html).
|
||||
|
||||
|
||||
**Future Work**
|
||||
|
||||
As always, the goal with kube-aws is to make deployments that are production ready. While we use kube-aws in production on AWS today, this project is pre-1.0 and there are a number of areas in which kube-aws needs to evolve.
|
||||
|
||||
**Fault tolerance** : At CoreOS we believe Kubernetes on AWS is a potent platform for fault-tolerant and self-healing deployments. In the upcoming weeks, kube-aws will be rising to a new challenge: surviving the [Chaos Monkey](https://github.com/Netflix/SimianArmy/wiki/Chaos-Monkey) – control plane and all!
|
||||
|
||||
**Zero-downtime updates** : Updating CoreOS nodes and Kubernetes components can be done without downtime and without interdependency with the correct instance replacement strategy.
|
||||
|
||||
A [github issue](https://github.com/coreos/coreos-kubernetes/issues/340) tracks the work towards this goal. We look forward to seeing you get involved with the project by filing issues or contributing directly.
|
||||
|
||||
|
||||
_Learn more about Kubernetes and meet the community at [CoreOS Fest Berlin](https://coreos.com/fest/) - May 9-10, 2016_
|
||||
|
||||
|
||||
|
||||
_– Colin Hom, infrastructure engineer, CoreOS_
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " SIG-ClusterOps: Promote operability and interoperability of Kubernetes clusters "
|
||||
date: Wednesday, April 19, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: This week we’re featuring [Kubernetes Special Interest Groups](https://github.com/kubernetes/kubernetes/wiki/Special-Interest-Groups-(SIGs)); Today’s post is by the SIG-ClusterOps team whose mission is to promote operability and interoperability of Kubernetes clusters -- to listen, help & escalate._
|
||||
|
||||
We think Kubernetes is an awesome way to run applications at scale! Unfortunately, there's a bootstrapping problem: we need good ways to build secure & reliable scale environments around Kubernetes. While some parts of the platform administration leverage the platform (cool!), there are fundamental operational topics that need to be addressed and questions (like upgrade and conformance) that need to be answered.
|
||||
|
||||
**Enter Cluster Ops SIG – the community members who work under the platform to keep it running.**
|
||||
|
||||
Our objective for Cluster Ops is to be a person-to-person community first, and a source of opinions, documentation, tests and scripts second. That means we dedicate significant time and attention to simply comparing notes about what is working and discussing real operations. Those interactions give us data to form opinions. It also means we can use real-world experiences to inform the project.
|
||||
|
||||
We aim to become the forum for operational review and feedback about the project. For Kubernetes to succeed, operators need to have a significant voice in the project by weekly participation and collecting survey data. We're not trying to create a single opinion about ops, but we do want to create a coordinated resource for collecting operational feedback for the project. As a single recognized group, operators are more accessible and have a bigger impact.
|
||||
|
||||
**What about real world deliverables?**
|
||||
|
||||
We've got plans for tangible results too. We’re already driving toward concrete deliverables like reference architectures, tool catalogs, community deployment notes and conformance testing. Cluster Ops wants to become the clearing house for operational resources. We're going to do it based on real world experience and battle tested deployments.
|
||||
|
||||
**Connect with us.**
|
||||
|
||||
Cluster Ops can be hard work – don't do it alone. We're here to listen, to help when we can and escalate when we can't. Join the conversation at:
|
||||
|
||||
|
||||
- Chat with us on the [Cluster Ops Slack channel](https://kubernetes.slack.com/messages/sig-cluster-ops/)
|
||||
- Email us at the [Cluster Ops SIG email list](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-ops)
|
||||
|
||||
The Cluster Ops Special Interest Group meets weekly at 13:00PT on Thursdays, you can join us via the [video hangout](https://plus.google.com/hangouts/_/google.com/sig-cluster-ops) and see latest [meeting notes](https://docs.google.com/document/d/1IhN5v6MjcAUrvLd9dAWtKcGWBWSaRU8DNyPiof3gYMY/edit) for agendas and topics covered.
|
||||
|
||||
|
||||
|
||||
_--Rob Hirschfeld, CEO, RackN _
|
||||
|
|
@ -0,0 +1,146 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Using Deployment objects with Kubernetes 1.2 "
|
||||
date: Saturday, April 01, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor's note: this is the seventh post in a [series of in-depth posts](http://blog.kubernetes.io/2016/03/five-days-of-kubernetes-12.html) on what's new in Kubernetes 1.2_
|
||||
|
||||
Kubernetes has made deploying and managing applications very straightforward, with most actions a single API or command line away, including rolling out new applications, canary testing and upgrading. So why would we need Deployments?
|
||||
|
||||
Deployment objects automate deploying and rolling updating applications. Compared with kubectl rolling-update, Deployment API is much faster, is declarative, is implemented server-side and has more features (for example, you can rollback to any previous revision even after the rolling update is done).
|
||||
|
||||
In today’s blogpost, we’ll cover how to use Deployments to:
|
||||
|
||||
1. Deploy/rollout an application
|
||||
2. Update the application declaratively and progressively, without a service outage
|
||||
3. Rollback to a previous revision, if something’s wrong when you’re deploying/updating the application
|
||||
|
||||
[](https://4.bp.blogspot.com/-M9Xc21XYtLA/Vv7ImzURFxI/AAAAAAAACg0/jlHU3nJ-qYwC74DMiD-joaDPqQfebj3-g/s1600/image03.gif)
|
||||
|
||||
Without further ado, let’s start playing around with Deployments!
|
||||
|
||||
|
||||
### Getting started
|
||||
If you want to try this example, basically you’ll need 3 things:
|
||||
|
||||
1. **A running Kubernetes cluster** : If you don’t already have one, check the [Getting Started guides](http://kubernetes.io/docs/getting-started-guides/) for a list of solutions on a range of platforms, from your laptop, to VMs on a cloud provider, to a rack of bare metal servers.
|
||||
2. **Kubectl, the Kubernetes CLI** : If you see a URL response after running kubectl cluster-info, you’re ready to go. Otherwise, follow the [instructions](http://kubernetes.io/docs/user-guide/prereqs/) to install and configure kubectl; or the [instructions for hosted solutions](https://cloud.google.com/container-engine/docs/before-you-begin) if you have a Google Container Engine cluster.
|
||||
3. The [configuration files for this demo](https://github.com/kubernetes/kubernetes.github.io/tree/master/docs/user-guide/update-demo).
|
||||
If you choose not to run this example yourself, that’s okay. Just watch this [video](https://youtu.be/eigalYy0v4w) to see what’s going on in each step.
|
||||
|
||||
|
||||
### Diving in
|
||||
The configuration files contain a static website. First, we want to start serving its static content. From the root of the Kubernetes repository, run:
|
||||
```
|
||||
$ kubectl proxy --www=docs/user-guide/update-demo/local/ &
|
||||
```
|
||||
Starting to serve on …
|
||||
|
||||
This runs a proxy on the default port 8001. You may now visit [http://localhost:8001/static/](http://localhost:8001/static/) the demo website (and it should be a blank page for now). Now we want to run an app and show it on the website.
|
||||
```
|
||||
$ kubectl run update-demo
|
||||
--image=gcr.io/google\_containers/update-demo:nautilus --port=80 -l name=update-demo
|
||||
|
||||
deployment “update-demo” created
|
||||
```
|
||||
This deploys 1 replica of an app with the image “update-demo:nautilus” and you can see it visually on [http://localhost:8001/static/](http://localhost:8001/static/).1
|
||||
|
||||
|
||||
|
||||
[](https://3.bp.blogspot.com/-EYXhcEK1upw/Vv7JL4rOAtI/AAAAAAAACg4/uy9oKePGjA82xPHhX6ak2_NiHPZ3FU8gw/s1600/deployment-API-5.png)
|
||||
|
||||
The card showing on the website represents a Kubernetes pod, with the pod’s name (ID), status, image, and labels.
|
||||
|
||||
|
||||
### Getting bigger
|
||||
Now we want more copies of this app!
|
||||
$ kubectl scale deployment/update-demo --replicas=4
|
||||
deployment "update-demo" scaled
|
||||
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-6YXQqogAGcY/Vv7JnU7g_FI/AAAAAAAAChE/00pqgQvUXkcgjPzi7NfDnSSRJeBUHFaGQ/s1600/deployment-API-2.png)
|
||||
|
||||
### Updating your application
|
||||
How about updating the app?
|
||||
```
|
||||
$ kubectl edit deployment/update-demo
|
||||
|
||||
This opens up your default editor, and you can update the deployment on the fly. Find .spec.template.spec.containers[0].image and change nautilus to kitty. Save the file, and you’ll see:
|
||||
|
||||
deployment "update-demo" edited
|
||||
```
|
||||
You’re now updating the image of this app from “update-demo:nautilus” to “update-demo:kitty”. Deployments allow you to update the app progressively, without a service outage.
|
||||
|
||||
|
||||
[](https://2.bp.blogspot.com/-x4FmFXdzw30/Vv7KAAQ21wI/AAAAAAAAChM/QWv8Y03lIsU4JBqjE3XFQU2EtzZgogylA/s1600/deployment-API-3.png)
|
||||
|
||||
After a while, you’ll find the update seems stuck. What happened?
|
||||
|
||||
### Debugging your rollout
|
||||
If you look closer, you’ll find that the pods with the new “kitty” tagged image stays pending. The Deployment automatically stops the rollout if it’s failing. Let’s look at one of the new pod to see what happened:
|
||||
```
|
||||
$ kubectl describe pod/update-demo-1326485872-a4key
|
||||
```
|
||||
Looking at the events of this pod, you’ll notice that Kubernetes failed to pull the image because the “kitty” tag wasn’t found:
|
||||
|
||||
Failed to pull image "gcr.io/google\_containers/update-demo:kitty": Tag kitty not found in repository gcr.io/google\_containers/update-demo
|
||||
|
||||
### Rolling back
|
||||
Ok, now we want to undo the changes and then take our time to figure out which image tag we should use.
|
||||
```
|
||||
$ kubectl rollout undo deployment/update-demo
|
||||
deployment "update-demo" rolled back
|
||||
```
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-6YXQqogAGcY/Vv7JnU7g_FI/AAAAAAAAChE/00pqgQvUXkcgjPzi7NfDnSSRJeBUHFaGQ/s1600/deployment-API-2.png)
|
||||
|
||||
Everything’s back to normal, phew!
|
||||
|
||||
To learn more about rollback, visit [rolling back a Deployment](http://kubernetes.io/docs/user-guide/deployments/#rolling-back-a-deployment).
|
||||
|
||||
### Updating your application (for real)
|
||||
After a while, we finally figure that the right image tag is “kitten”, instead of “kitty”. Now change .spec.template.spec.containers[0].image tag from “nautilus“ to “kitten“.
|
||||
```
|
||||
$ kubectl edit deployment/update-demo
|
||||
deployment "update-demo" edited
|
||||
```
|
||||
|
||||
|
||||
[](https://4.bp.blogspot.com/-u7qPUSQOMLE/Vv7JndUqKaI/AAAAAAAAChA/jHoysiDbnNQU2prPJn19ZFOtLiatzPsMg/s1600/deployment-API-1.png)
|
||||
|
||||
Now you see there are 4 cute kittens on the demo website, which means we’ve updated the app successfully! If you want to know the magic behind this, look closer at the Deployment:
|
||||
```
|
||||
$ kubectl describe deployment/update-demo
|
||||
```
|
||||
|
||||
|
||||
[](https://1.bp.blogspot.com/-3U1OTNqdz1s/Vv7Kfw4uGYI/AAAAAAAAChU/CgF6Mv5J6b8_lANXkpEIFytRGo9x0Bn_A/s1600/deployment-API-6.png)
|
||||
|
||||
From the events section, you’ll find that the Deployment is managing another resource called [Replica Set](http://kubernetes.io/docs/user-guide/replicasets/), each controls the number of replicas of a different pod template. The Deployment enables progressive rollout by scaling up and down Replica Sets of new and old pod templates.
|
||||
|
||||
### Conclusion
|
||||
Now, you’ve learned the basic use of Deployment objects:
|
||||
|
||||
1. Deploy an app with a Deployment, using kubectl run
|
||||
2. Updating the app by updating the Deployment with kubectl edit
|
||||
3. Rolling back to a previously deployed app with kubectl rollout undo
|
||||
But there’s so much more in Deployment that this article didn’t cover! To discover more, continue reading [Deployment’s introduction](http://kubernetes.io/docs/user-guide/deployments/).
|
||||
|
||||
**_Note:_** _In Kubernetes 1.2, Deployment (beta release) is now feature-complete and enabled by default. For those of you who have tried Deployment in Kubernetes 1.1, please **delete all Deployment 1.1 resources** (including the Replication Controllers and Pods they manage) before trying out Deployments in 1.2. This is necessary because we made some non-backward-compatible changes to the API._
|
||||
|
||||
If you’re interested in Kubernetes and configuration, you’ll want to participate in:
|
||||
|
||||
- Our Configuration [slack channel](https://kubernetes.slack.com/messages/sig-configuration/)
|
||||
- Our [Kubernetes Configuration Special Interest Group](https://groups.google.com/forum/#!forum/kubernetes-sig-config) email list
|
||||
- The Configuration “Special Interest Group,” which meets weekly on Wednesdays at 10am (10h00) Pacific Time at [SIG-Config hangout](https://hangouts.google.com/hangouts/_/google.com/kube-sig-config)
|
||||
And of course for more information about the project in general, go to [www.kubernetes.io](http://www.kubernetes.io/).
|
||||
|
||||
-- _Janet Kuo, Software Engineer, Google_
|
||||
|
||||
|
||||
**1** “kubectl run” outputs the type and name of the resource(s) it creates. In 1.2, it now creates a deployment resource. You can use that in subsequent commands, such as "kubectl get deployment ", or "kubectl expose deployment ". If you want to write a script to do that automatically, in a forward-compatible manner, use "-o name" flag with "kubectl run", and it will generate short output "deployments/", which can also be used on subsequent command lines. The "--generator" flag can be used with "kubectl run" to generate other types of resources, for example, set it to "run/v1" to create a Replication Controller, which was the default in 1.1 and 1.0, and to "run-pod/v1" to create a Pod, such as for --restart=Never pods.
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " CoreOS Fest 2016: CoreOS and Kubernetes Community meet in Berlin (& San Francisco) "
|
||||
date: Wednesday, May 03, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
[CoreOS Fest 2016](https://coreos.com/fest/) will bring together the container and open source distributed systems community, including many thought leaders in the Kubernetes space. It is the second annual CoreOS community conference, held for the first time in Berlin on May 9th and 10th. CoreOS believes Kubernetes is the container orchestration component to deliver GIFEE (Google’s Infrastructure for Everyone Else).
|
||||
|
||||
At this year’s CoreOS Fest, there are tracks dedicated to Kubernetes where you’ll hear about various topics ranging from Kubernetes performance and scalability, continuous delivery and Kubernetes, rktnetes, stackanetes and more. In addition, there will be a variety of talks, from introductory workshops to deep-dives into all things containers and related software.
|
||||
|
||||
Don’t miss these great speaker sessions at the conference in **Berlin** :
|
||||
|
||||
|
||||
- [Kubernetes Performance & Scalability Deep-Dive](https://coreosfest2016.sched.org/event/6ckp/kubernetes-performance-scalability-deep-dive?iframe=no&w=i:100;&sidebar=yes&bg=no) by Filip Grzadkowski, Senior Software Engineer at Google
|
||||
- [Launching a complex application in a Kubernetes cloud](http://coreosfest2016.sched.org/event/6T0b/launching-a-complex-application-in-a-kubernetes-cloud) by Thomas Fricke and Jannis Rake-Revelant, Operations & Infrastructure Lead, immmr Gmbh (a service developed by the Deutsche Telekom’s R&D department)
|
||||
- [I have Kubernetes, now what?](https://coreosfest2016.sched.org/event/6db3/i-have-kubernetes-now-what?iframe=no&w=i:100;&sidebar=yes&bg=no) by Gabriel Monroy, CTO of Engine Yard and creator of Deis
|
||||
- [When rkt meets Kubernetes: a troubleshooting tale](https://coreosfest2016.sched.org/event/6YGg/when-rkt-meets-kubernetes-a-troubleshooting-tale?iframe=no&w=i:100;&sidebar=yes&bg=no) by Luca Marturana, Software Engineer at Sysdig
|
||||
- [Use Kubernetes to deploy telecom applications](https://coreosfest2016.sched.org/event/6eSE/use-kubernetes-to-deploy-telecom-applications?iframe=no&w=i:100;&sidebar=yes&bg=no) by Victor Hu, Senior Engineer at Huawei Technologies
|
||||
- [Continuous Delivery, Kubernetes and You](https://coreosfest2016.sched.org/event/6qCs/continuous-delivery-kubernetes-and-you?iframe=no&w=i:100;&sidebar=yes&bg=no) by Micha Hernandez van Leuffen, CEO and founder of Wercker
|
||||
- [#GIFEE, More Containers, More Problems](https://coreosfest2016.sched.org/event/6YJl/gifee-more-containers-more-problems?iframe=no&w=i:100;&sidebar=yes&bg=no) by Ed Rooth, Head of Tectonic at CoreOS
|
||||
- [Kubernetes Access Control with dex](https://coreosfest2016.sched.org/event/6YH4/kubernetes-access-control-with-dex?iframe=no&w=i:100;&sidebar=yes&bg=no) by Eric Chiang, Software Engineer at CoreOS
|
||||
|
||||
If you can’t make it to Berlin, Kubernetes is also a focal point at the **CoreOS Fest [San Francisco](https://www.eventbrite.com/e/coreos-fest-san-francisco-satellite-event-tickets-22705108591)**[**satellite event**](https://www.eventbrite.com/e/coreos-fest-san-francisco-satellite-event-tickets-22705108591), a one day event dedicated to CoreOS and Kubernetes. In fact, Tim Hockin, senior staff engineer at Google and one of the creators of Kubernetes, will be kicking off the day with a keynote dedicated to Kubernetes updates.
|
||||
|
||||
**San Francisco** sessions dedicated to Kubernetes include:
|
||||
|
||||
|
||||
- Tim Hockin’s keynote address, Senior Staff Engineer at Google
|
||||
- When rkt meets Kubernetes: a troubleshooting tale by Loris Degioanni, CEO of Sysdig
|
||||
- rktnetes: what's new with container runtimes and Kubernetes by Derek Gonyeo, Software Engineer at CoreOS
|
||||
- Magical Security Sprinkles: Secure, Resilient Microservices on CoreOS and Kubernetes by Oliver Gould, CTO of Buoyant
|
||||
|
||||
**Kubernetes Workshop in SF** : [Getting Started with Kubernetes](https://www.eventbrite.com/e/getting-started-with-kubernetes-tickets-25180552711), hosted at Google San Francisco office (345 Spear St - 7th floor) by Google Developer Program Engineers Carter Morgan and Bill Prin on Tuesday May 10th from 9:00am to 1:00pm, lunch will be served afterwards. Limited seats, please [RSVP for free here](https://www.eventbrite.com/e/getting-started-with-kubernetes-tickets-25180552711).
|
||||
|
||||
**Get your tickets** :
|
||||
|
||||
- [CoreOS Fest - Berlin](https://ti.to/coreos/coreos-fest-2016/en), at the [Berlin Congress Center](https://www.google.com/maps/place/bcc+Berlin+Congress+Center+GmbH/@52.5206732,13.4165195,15z/data=!4m2!3m1!1s0x0:0xd2a15220241f2080) ([hotel option](http://www.parkinn-berlin.de/))
|
||||
- satellite event in [San Francisco](https://www.eventbrite.com/e/coreos-fest-san-francisco-satellite-event-tickets-22705108591), at the [111 Minna Gallery](https://www.google.com/maps/place/111+Minna+Gallery/@37.7873222,-122.3994124,15z/data=!4m2!3m1!1s0x0:0xb55875af8c0ca88b?sa=X&ved=0ahUKEwjZ8cPLtL7MAhVQ5GMKHa8bCM4Q_BIIdjAN)
|
||||
|
||||
Learn more at: [coreos.com/fest/](https://coreos.com/fest/) and on Twitter [@CoreOSFest](https://twitter.com/coreosfest) #CoreOSFest
|
||||
|
||||
|
||||
_-- Sarah Novotny, Kubernetes Community Manager_
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Hypernetes: Bringing Security and Multi-tenancy to Kubernetes "
|
||||
date: Wednesday, May 24, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Today’s guest post is written by Harry Zhang and Pengfei Ni, engineers at HyperHQ, describing a new hypervisor based container called HyperContainer_
|
||||
|
||||
While many developers and security professionals are comfortable with Linux containers as an effective boundary, many users need a stronger degree of isolation, particularly for those running in a multi-tenant environment. Sadly, today, those users are forced to run their containers inside virtual machines, even one VM per container.
|
||||
|
||||
Unfortunately, this results in the loss of many of the benefits of a cloud-native deployment: slow startup time of VMs; a memory tax for every container; low utilization resulting in wasting resources.
|
||||
|
||||
In this post, we will introduce HyperContainer, a hypervisor based container and see how it naturally fits into the Kubernetes design, and enables users to serve their customers directly with virtualized containers, instead of wrapping them inside of full blown VMs.
|
||||
|
||||
**HyperContainer**
|
||||
|
||||
[HyperContainer](http://hypercontainer.io/) is a hypervisor-based container, which allows you to launch Docker images with standard hypervisors (KVM, Xen, etc.). As an open-source project, HyperContainer consists of an [OCI](https://github.com/opencontainers/runtime-spec) compatible runtime implementation, named [runV](https://github.com/hyperhq/runv/), and a management daemon named [hyperd](https://github.com/hyperhq/hyperd). The idea behind HyperContainer is quite straightforward: to combine the best of both virtualization and container.
|
||||
|
||||
We can consider containers as two parts (as Kubernetes does). The first part is the container runtime, where HyperContainer uses virtualization to achieve execution isolation and resource limitation instead of namespaces and cgroups. The second part is the application data, where HyperContainer leverages Docker images. So in HyperContainer, virtualization technology makes it possible to build a fully isolated sandbox with an independent guest kernel (so things like `top` and /proc all work), but from developer’s view, it’s portable and behaves like a standard container.
|
||||
|
||||
**HyperContainer as Pod**
|
||||
|
||||
The interesting part of HyperContainer is not only that it is secure enough for multi-tenant environments (such as a public cloud), but also how well it fits into the Kubernetes philosophy.
|
||||
|
||||
One of the most important concepts in Kubernetes is Pods. The design of Pods is a lesson learned ([Borg paper section 8.1](http://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43438.pdf)) from real world workloads, where in many cases people want an atomic scheduling unit composed of multiple containers (please check this [example](https://github.com/kubernetes/kubernetes/tree/master/examples/javaweb-tomcat-sidecar) for further information). In the context of Linux containers, a Pod wraps and encapsulates several containers into a logical group. But in HyperContainer, the hypervisor serves as a natural boundary, and Pods are introduced as first-class objects:
|
||||
|
||||
|
||||
|
||||
{: .big-img}
|
||||
|
||||
|
||||
|
||||
HyperContainer wraps a Pod of light-weight application containers and exposes the container interface at Pod level. Inside the Pod, a minimalist Linux kernel called HyperKernel is booted. This HyperKernel is built with a tiny Init service called HyperStart. It will act as the PID 1 process and creates the Pod, setup Mount namespace, and launch apps from the loaded images.
|
||||
|
||||
|
||||
|
||||
This model works nicely with Kubernetes. The integration of HyperContainer with Kubernetes, as we indicated in the title, is what makes up the [Hypernetes](https://github.com/hyperhq/hypernetes) project.
|
||||
|
||||
|
||||
|
||||
**Hypernetes**
|
||||
|
||||
|
||||
|
||||
One of the best parts of Kubernetes is that it is designed to support multiple container runtimes, meaning users are not locked-in to a single vendor. We are very pleased to announce that we have already begun working with the Kubernetes team to integrate HyperContainer into Kubernetes upstream. This integration involves:
|
||||
|
||||
1. container runtime optimizing and refactoring
|
||||
2. new client-server mode runtime interface
|
||||
3. containerd integration to support runV
|
||||
|
||||
The OCI standard and kubelet’s multiple runtime architecture make this integration much easier even though HyperContainer is not based on Linux container technology stack.
|
||||
|
||||
|
||||
|
||||
On the other hand, in order to run HyperContainers in multi-tenant environment, we also created a new network plugin and modified an existing volume plugin. Since Hypernetes runs Pod as their own VMs, it can make use of your existing IaaS layer technologies for multi-tenant network and persistent volumes. The current Hypernetes implementation uses standard Openstack components.
|
||||
|
||||
|
||||
|
||||
Below we go into further details about how all those above are implemented.
|
||||
|
||||
|
||||
|
||||
**Identity and Authentication**
|
||||
|
||||
|
||||
|
||||
In Hypernetes we chose [Keystone](http://docs.openstack.org/developer/keystone/) to manage different tenants and perform identification and authentication for tenants during any administrative operation. Since Keystone comes from the OpenStack ecosystem, it works seamlessly with the network and storage plugins we used in Hypernetes.
|
||||
|
||||
|
||||
|
||||
**Multi-tenant Network Model**
|
||||
|
||||
|
||||
|
||||
For a multi-tenant container cluster, each tenant needs to have strong network isolation from each other tenant. In Hypernetes, each tenant has its own Network. Instead of configuring a new network using OpenStack, which is complex, with Hypernetes, you just create a Network object like below.
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Network
|
||||
metadata:
|
||||
name: net1
|
||||
spec:
|
||||
tenantID: 065f210a2ca9442aad898ab129426350
|
||||
subnets:
|
||||
subnet1:
|
||||
cidr: 192.168.0.0/24
|
||||
gateway: 192.168.0.1
|
||||
```
|
||||
|
||||
|
||||
Note that the tenantID is supplied by Keystone. This yaml will automatically create a new Neutron network with a default router and a subnet 192.168.0.0/24.
|
||||
|
||||
|
||||
|
||||
A Network controller will be responsible for the life-cycle management of any Network instance created by the user. This Network can be assigned to one or more Namespaces, and any Pods belonging to the same Network can reach each other directly through IP address.
|
||||
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ns1
|
||||
spec:
|
||||
network: net1
|
||||
```
|
||||
|
||||
|
||||
If a Namespace does not have a Network spec, it will use the default Kubernetes network model instead, including the default kube-proxy. So if a user creates a Pod in a Namespace with an associated Network, Hypernetes will follow the [Kubernetes Network Plugin Model](http://kubernetes.io/docs/admin/network-plugins/) to set up a Neutron network for this Pod. Here is a high level example:
|
||||
|
||||
|
||||
|
||||
{: HyperContainer wraps a Pod of li.big-img}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Hypernetes uses a standalone gRPC handler named kubestack to translate the Kubernetes Pod request into the Neutron network API. Moreover, kubestack is also responsible for handling another important networking feature: a multi-tenant Service proxy.
|
||||
|
||||
|
||||
|
||||
In a multi-tenant environment, the default iptables-based kube-proxy can not reach the individual Pods, because they are isolated into different networks. Instead, Hypernetes uses a [built-in HAproxy in every HyperContainer](https://github.com/hyperhq/hyperd/blob/2072dd8e28a02a25ae6a819f81029b47a579e683/servicediscovery/servicediscovery.go) as the portal. This HAproxy will proxy all the Service instances in the namespace of that Pod. Kube-proxy will be responsible for updating these backend servers by following the standard OnServiceUpdate and OnEndpointsUpdate processes, so that users will not notice any difference. A downside of this method is that HAproxy has to listen to some specific ports which may conflicts with user’s containers.That’s why we are planning to use LVS to replace this proxy in the next release.
|
||||
|
||||
|
||||
|
||||
With the help of the Neutron based network plugin, the Hypernetes Service is able to provide an OpenStack load balancer, just like how the “external” load balancer does on GCE. When user creates a Service with external IPs, an OpenStack load balancer will be created and endpoints will be automatically updated through the kubestack workflow above.
|
||||
|
||||
|
||||
|
||||
**Persistent Storage**
|
||||
|
||||
|
||||
|
||||
When considering storage, we are actually building a tenant-aware persistent volume in Kubernetes. The reason we decided not to use existing Cinder volume plugin of Kubernetes is that its model does not work in the virtualization case. Specifically:
|
||||
|
||||
|
||||
|
||||
The Cinder volume plugin requires OpenStack as the Kubernetes provider.
|
||||
|
||||
The OpenStack provider will find on which VM the target Pod is running on
|
||||
|
||||
Cinder volume plugin will mount a Cinder volume to a path inside the host VM of Kubernetes.
|
||||
|
||||
The kubelet will bind mount this path as a volume into containers of target Pod.
|
||||
|
||||
|
||||
|
||||
But in Hypernetes, things become much simpler. Thanks to the physical boundary of Pods, HyperContainer can mount Cinder volumes directly as block devices into Pods, just like a normal VM. This mechanism eliminates extra time to query Nova to find out the VM of target Pod in the existing Cinder volume workflow listed above.
|
||||
|
||||
|
||||
|
||||
The current implementation of the Cinder plugin in Hypernetes is based on Ceph RBD backend, and it works the same as all other Kubernetes volume plugins, one just needs to remember to create the Cinder volume (referenced by volumeID below) beforehand.
|
||||
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: nginx-persistent-storage
|
||||
mountPath: /var/lib/nginx
|
||||
volumes:
|
||||
- name: nginx-persistent-storage
|
||||
cinder:
|
||||
volumeID: 651b2a7b-683e-47e1-bdd6-e3c62e8f91c0
|
||||
fsType: ext4
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
So when the user provides a Pod yaml with a Cinder volume, Hypernetes will check if kubelet is using the Hyper container runtime. If so, the Cinder volume can be mounted directly to the Pod without any extra path mapping. Then the volume metadata will be passed to the Kubelet RunPod process as part of HyperContainer spec. Done!
|
||||
|
||||
|
||||
|
||||
Thanks to the plugin model of Kubernetes network and volume, we can easily build our own solutions above for HyperContainer though it is essentially different from the traditional Linux container. We also plan to propose these solutions to Kubernetes upstream by following the CNI model and volume plugin standard after the runtime integration is completed.
|
||||
|
||||
We believe all of these [open source projects](https://github.com/hyperhq/) are important components of the container ecosystem, and their growth depends greatly on the open source spirit and technical vision of the Kubernetes team.
|
||||
|
||||
|
||||
|
||||
**Conclusion**
|
||||
|
||||
|
||||
|
||||
This post introduces some of the technical details about HyperContainer and the Hypernetes project. We hope that people will be interested in this new category of secure container and its integration with Kubernetes. If you are looking to try out Hypernetes and HyperContainer, we have just announced the public beta of our new secure container cloud service ([Hyper\_](https://hyper.sh/)), which is built on these technologies. But even if you are running on-premise, we believe that Hypernetes and HyperContainer will let you run Kubernetes in a more secure way.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_~Harry Zhang and Pengfei Ni, engineers at HyperHQ_
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Bringing End-to-End Kubernetes Testing to Azure (Part 1) "
|
||||
date: Tuesday, June 06, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
_Today’s guest post is by Travis Newhouse, Chief Architect at AppFormix, writing about their experiences bringing Kubernetes to Azure._
|
||||
|
||||
At [AppFormix](http://www.appformix.com/), continuous integration testing is part of our culture. We see many benefits to running end-to-end tests regularly, including minimizing regressions and ensuring our software works together as a whole. To ensure a high quality experience for our customers, we require the ability to run end-to-end testing not just for our application, but for the entire orchestration stack. Our customers are adopting Kubernetes as their container orchestration technology of choice, and they demand choice when it comes to where their containers execute, from private infrastructure to public providers, including Azure. After several weeks of work, we are pleased to announce we are contributing a nightly, continuous integration job that executes e2e tests on the Azure platform. After running the e2e tests each night for only a few weeks, we have already found and fixed two issues in Kubernetes. We hope our contribution of an e2e job will help the community maintain support for the Azure platform as Kubernetes evolves.
|
||||
|
||||
|
||||
|
||||
In this blog post, we describe the journey we took to implement deployment scripts for the Azure platform. The deployment scripts are a prerequisite to the e2e test job we are contributing, as the scripts make it possible for our e2e test job to test the latest commits to the Kubernetes master branch. In a subsequent blog post, we will describe details of the e2e tests that will help maintain support for the Azure platform, and how to contribute federated e2e test results to the Kubernetes project.
|
||||
|
||||
|
||||
|
||||
**BACKGROUND**
|
||||
|
||||
While Kubernetes is designed to operate on any IaaS, and [solution guides](http://kubernetes.io/docs/getting-started-guides/#table-of-solutions) exist for many platforms including [Google Compute Engine](http://kubernetes.io/docs/getting-started-guides/gce/), [AWS](http://kubernetes.io/docs/getting-started-guides/aws/), [Azure](http://kubernetes.io/docs/getting-started-guides/coreos/azure/), and [Rackspace](http://kubernetes.io/docs/getting-started-guides/rackspace/), the Kubernetes project refers to these as “versioned distros,” as they are only tested against a particular binary release of Kubernetes. On the other hand, “development distros” are used daily by automated, e2e tests for the latest Kubernetes source code, and serve as gating checks to code submission.
|
||||
|
||||
|
||||
|
||||
When we first surveyed existing support for Kubernetes on Azure, we found documentation for running Kubernetes on Azure using CoreOS and Weave. The documentation includes [scripts for deployment](http://kubernetes.io/docs/getting-started-guides/coreos/azure/), but the scripts do not conform to the cluster/kube-up.sh framework for automated cluster creation required by a “development distro.” Further, there did not exist a continuous integration job that utilized the scripts to validate Kubernetes using the end-to-end test scenarios (those found in test/e2e in the Kubernetes repository).
|
||||
|
||||
|
||||
|
||||
With some additional investigation into the project history (side note: git log --all --grep='azure' --oneline was quite helpful), we discovered that there previously existed a set of scripts that integrated with the cluster/kube-up.sh framework. These scripts were discarded on October 16, 2015 ([commit 8e8437d](https://github.com/kubernetes/kubernetes/pull/15790)) because the scripts hadn’t worked since before Kubernetes version 1.0. With these commits as a starting point, we set out to bring the scripts up to date, and create a supported continuous integration job that will aid continued maintenance.
|
||||
|
||||
|
||||
|
||||
**CLUSTER DEPLOYMENT SCRIPTS**
|
||||
|
||||
To setup a Kubernetes cluster with Ubuntu VMs on Azure, we followed the groundwork laid by the previously abandoned commit, and tried to leverage the existing code as much as possible. The solution uses SaltStack for deployment and OpenVPN for networking between the master and the minions. SaltStack is also used for configuration management by several other solutions, such as AWS, GCE, Vagrant, and Vsphere. Resurrecting the discarded commit was a starting point, but we soon realized several key elements that needed attention:
|
||||
|
||||
- Install Docker and Kubernetes on the nodes using SaltStack
|
||||
- Configure authentication for services
|
||||
- Configure networking
|
||||
|
||||
The cluster setup scripts ensure Docker is installed, copy the Kubernetes Docker images to the master and minions nodes, and load the images. On the master node, SaltStack launches kubelet, which in turn launches the following Kubernetes services running in containers: kube-api-server, kube-scheduler, and kube-controller-manager. On each of the minion nodes, SaltStack launches kubelet, which starts kube-proxy.
|
||||
|
||||
|
||||
|
||||
Kubernetes services must authenticate when communicating with each other. For example, minions register with the kube-api service on the master. On the master node, scripts generate a self-signed certificate and key that kube-api uses for TLS. Minions are configured to skip verification of the kube-api’s (self-signed) TLS certificate. We configure the services to use username and password credentials. The username and password are generated by the cluster setup scripts, and stored in the kubeconfig file on each node.
|
||||
|
||||
|
||||
|
||||
Finally, we implemented the networking configuration. To keep the scripts parameterized and minimize assumptions about the target environment, the scripts create a new Linux bridge device (cbr0), and ensure that all containers use that interface to access the network. To configure networking, we use OpenVPN to establish tunnels between master and minion nodes. For each minion, we reserve a /24 subnet to use for its pods. Azure assigned each node its own IP address. We also added the necessary routing table entries for this bridge to use OpenVPN interfaces. This is required to ensure pods in different hosts can communicate with each other. The routes on the master and minion are the following:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
###### master
|
||||
```
|
||||
Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
|
||||
10.8.0.0 10.8.0.2 255.255.255.0 UG 0 0 0 tun0
|
||||
|
||||
10.8.0.2 0.0.0.0 255.255.255.255 UH 0 0 0 tun0
|
||||
|
||||
10.244.1.0 10.8.0.2 255.255.255.0 UG 0 0 0 tun0
|
||||
|
||||
10.244.2.0 10.8.0.2 255.255.255.0 UG 0 0 0 tun0
|
||||
|
||||
172.18.0.0 0.0.0.0 255.255.0.0 U 0 0 0 cbr0
|
||||
```
|
||||
|
||||
###### minion-1
|
||||
```
|
||||
10.8.0.0 10.8.0.5 255.255.255.0 UG 0 0 0 tun0
|
||||
|
||||
10.8.0.5 0.0.0.0 255.255.255.255 UH 0 0 0 tun0
|
||||
|
||||
10.244.1.0 0.0.0.0 255.255.255.0 U 0 0 0 cbr0
|
||||
|
||||
10.244.2.0 10.8.0.5 255.255.255.0 UG 0 0 0 tun0
|
||||
```
|
||||
|
||||
###### minion-2
|
||||
```
|
||||
10.8.0.0 10.8.0.9 255.255.255.0 UG 0 0 0 tun0
|
||||
|
||||
10.8.0.9 0.0.0.0 255.255.255.255 UH 0 0 0 tun0
|
||||
|
||||
10.244.1.0 10.8.0.9 255.255.255.0 UG 0 0 0 tun0
|
||||
|
||||
10.244.2.0 0.0.0.0 255.255.255.0 U 0 0 0 cbr0
|
||||
```
|
||||
|
||||
[](https://3.bp.blogspot.com/-U2KYWNzJpFI/V3QMYbKRX8I/AAAAAAAAAks/SqEvCDJHJ8QtbB9hJVM8WAkFuAUlrFl8ACLcB/s1600/Kubernetes%2BBlog%2BPost%2B-%2BKubernetes%2Bon%2BAzure%2B%2528Part%2B1%2529.png) |
|
||||
| Figure 1 - OpenVPN network configuration |
|
||||
|
||||
**FUTURE WORK** With the deployment scripts implemented, a subset of e2e test cases are passing on the Azure platform. Nightly results are published to the [Kubernetes test history dashboard](http://storage.googleapis.com/kubernetes-test-history/static/index.html). Weixu Zhuang made a [pull request](https://github.com/kubernetes/kubernetes/pull/21207) on Kubernetes GitHub, and we are actively working with the Kubernetes community to merge the Azure cluster deployment scripts necessary for a nightly e2e test job. The deployment scripts provide a minimal working environment for Kubernetes on Azure. There are several next steps to continue the work, and we hope the community will get involved to achieve them.
|
||||
|
||||
- Only a subset of the e2e scenarios are passing because some cloud provider interfaces are not yet implemented for Azure, such as load balancer and instance information. To this end, we seek community input and help to define an Azure implementation of the cloudprovider interface (pkg/cloudprovider/). These interfaces will enable features such as Kubernetes pods being exposed to the external network and cluster DNS.
|
||||
- Azure has new APIs for interacting with the service. The submitted scripts currently use the Azure Service Management APIs, [which are deprecated](https://azure.microsoft.com/en-us/documentation/articles/azure-classic-rm/). The Azure Resource Manager APIs should be used in the deployment scripts.
|
||||
The team at AppFormix is pleased to contribute support for Azure to the Kubernetes community. We look forward to feedback about how we can work together to improve Kubernetes on Azure.
|
||||
|
||||
|
||||
|
||||
_Editor's Note: Want to _contribute to_ Kubernetes, get involved [here](https://github.com/kubernetes/kubernetes/issues?q=is%3Aopen+is%3Aissue+label%3Ahelp-wanted). Have your own Kubernetes story you’d like to tell, [let us know](https://docs.google.com/a/google.com/forms/d/1cHiRdmBCEmUH9ekHY2G-KDySk5YXRzALHcMNgzwXtPM/viewform)!_
|
||||
|
||||
|
||||
Part II is available [here](http://blog.kubernetes.io/2016/07/bringing-end-to-end-kubernetes-testing-to-azure-2.html).
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Container Design Patterns "
|
||||
date: Wednesday, June 21, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Kubernetes automates deployment, operations, and scaling of applications, but our goals in the Kubernetes project extend beyond system management -- we want Kubernetes to help developers, too. Kubernetes should make it easy for them to write the distributed applications and services that run in cloud and datacenter environments. To enable this, Kubernetes defines not only an API for administrators to perform management actions, but also an API for containerized applications to interact with the management platform.
|
||||
|
||||
Our work on the latter is just beginning, but you can already see it manifested in a few features of Kubernetes. For example:
|
||||
|
||||
|
||||
- The “[graceful termination](http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_podspec)” mechanism provides a callback into the container a configurable amount of time before it is killed (due to a rolling update, node drain for maintenance, etc.). This allows the application to cleanly shut down, e.g. persist in-memory state and cleanly conclude open connections.
|
||||
- [Liveness and readiness probes](http://kubernetes.io/docs/user-guide/production-pods/#liveness-and-readiness-probes-aka-health-checks) check a configurable application HTTP endpoint (other probe types are supported as well) to determine if the container is alive and/or ready to receive traffic. The response determines whether Kubernetes will restart the container, include it in the load-balancing pool for its Service, etc.
|
||||
- [ConfigMap](http://kubernetes.io/docs/user-guide/configmap/) allows applications to read their configuration from a Kubernetes resource rather than using command-line flags.
|
||||
|
||||
More generally, we see Kubernetes enabling a new generation of design patterns, similar to [object oriented design patterns](https://en.wikipedia.org/wiki/Object-oriented_programming#Design_patterns), but this time for containerized applications. That design patterns would emerge from containerized architectures is not surprising -- containers provide many of the same benefits as software objects, in terms of modularity/packaging, abstraction, and reuse. Even better, because containers generally interact with each other via HTTP and widely available data formats like JSON, the benefits can be provided in a language-independent way.
|
||||
|
||||
This week Kubernetes co-founder Brendan Burns is presenting a [**paper**](https://www.usenix.org/conference/hotcloud16/workshop-program/presentation/burns) outlining our thoughts on this topic at the [8th Usenix Workshop on Hot Topics in Cloud Computing](https://www.usenix.org/conference/hotcloud16) (HotCloud ‘16), a venue where academic researchers and industry practitioners come together to discuss ideas at the forefront of research in private and public cloud technology. The paper describes three classes of patterns: management patterns (such as those described above), patterns involving multiple cooperating containers running on the same node, and patterns involving containers running across multiple nodes. We don’t want to spoil the fun of reading the paper, but we will say that you’ll see that the [Pod](http://kubernetes.io/docs/user-guide/pods/) abstraction is a key enabler for the last two types of patterns.
|
||||
|
||||
As the Kubernetes project continues to bring our decade of experience with [Borg](https://queue.acm.org/detail.cfm?id=2898444) to the open source community, we aim not only to make application deployment and operations at scale simple and reliable, but also to make it easy to create “cloud-native” applications in the first place. Our work on documenting our ideas around design patterns for container-based services, and Kubernetes’s enabling of such patterns, is a first step in this direction. We look forward to working with the academic and practitioner communities to identify and codify additional patterns, with the aim of helping containers fulfill the promise of bringing increased simplicity and reliability to the entire software lifecycle, from development, to deployment, to operations.
|
||||
|
||||
To learn more about the Kubernetes project visit [kubernetes.io](http://kubernetes.io/) or chat with us on Slack at [slack.kubernetes.io](http://slack.kubernetes.io/).
|
||||
|
||||
-_-Brendan Burns and David Oppenheimer, Software Engineers, Google_
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " The Illustrated Children's Guide to Kubernetes "
|
||||
date: Friday, June 09, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Kubernetes is an open source project with a growing community. We love seeing the ways that our community innovates inside and on top of Kubernetes. Deis is an excellent example of company who understands the strategic impact of strong container orchestration. They contribute directly to the project; in associated subprojects; and, delightfully, with a creative endeavor to help our user community understand more about what Kubernetes is. Want to contribute to Kubernetes? One way is to get involved [here](https://github.com/kubernetes/kubernetes/issues?q=is%3Aopen+is%3Aissue+label%3Ahelp-wanted) and help us with code. But, please don’t consider that the only way to contribute. This little adventure that Deis takes us is an example of how open source isn’t only code. _
|
||||
|
||||
_Have your own Kubernetes story you’d like to tell, [let us know](https://docs.google.com/a/google.com/forms/d/1cHiRdmBCEmUH9ekHY2G-KDySk5YXRzALHcMNgzwXtPM/viewform)!_
|
||||
_-- @sarahnovotny Community Wonk, Kubernetes project._
|
||||
|
||||
_Guest post is by Beau Vrolyk, CEO of Deis, the open source Kubernetes-native PaaS._
|
||||
|
||||
Over at [Deis](https://deis.com/), we’ve been busy building open source tools for Kubernetes. We’re just about to finish up moving our easy-to-use application platform to Kubernetes and couldn’t be happier with the results. In the Kubernetes project we’ve found not only a growing and vibrant community but also a well-architected system, informed by years of experience running containers at scale.
|
||||
|
||||
But that’s not all! As we’ve decomposed, ported, and reborn our PaaS as a Kubernetes citizen; we found a need for tools to help manage all of the ephemera that comes along with building and running Kubernetes-native applications. The result has been open sourced as [Helm](https://github.com/kubernetes/helm) and we’re excited to see increasing adoption and growing excitement around the project.
|
||||
|
||||
There’s fun in the Deis offices too -- we like to add some character to our architecture diagrams and pull requests. This time, literally. Meet Phippy--the intrepid little PHP app--and her journey to Kubernetes. What better way to talk to your parents, friends, and co-workers about this Kubernetes thing you keep going on about, than a little story time. We give to you The Illustrated Children's Guide to Kubernetes, conceived of and narrated by our own Matt Butcher and lovingly illustrated by Bailey Beougher. Join the fun on YouTube and tweet [@opendeis](https://twitter.com/Opendeis) to win your own copy of the book or a squishy little Phippy of your own.
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Steering an Automation Platform at Wercker with Kubernetes "
|
||||
date: Saturday, July 15, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: today’s guest post is by Andy Smith, the CTO of Wercker, sharing how Kubernetes helps them save time and speed up development. _
|
||||
|
||||
At [Wercker](http://wercker.com/) we run millions of containers that execute our users’ CI/CD jobs. The vast majority of them are ephemeral and only last as long as builds, tests and deploys take to run, the rest are ephemeral, too -- aren't we all --, but tend to last a bit longer and run our infrastructure. As we are running many containers across many nodes, we were in need of a highly scalable scheduler that would make our lives easier, and as such, decided to implement Kubernetes.
|
||||
|
||||
Wercker is a container-centric automation platform that helps developers build, test and deploy their applications. We support any number of pipelines, ranging from building code, testing API-contracts between microservices, to pushing containers to registries, and deploying to schedulers. All of these pipeline jobs run inside Docker containers and each artifact can be a Docker container.
|
||||
|
||||
And of course we use Wercker to build Wercker, and deploy itself onto Kubernetes!
|
||||
|
||||
**Overview**
|
||||
|
||||
Because we are a platform for running multi-service cloud-native code we've made many design decisions around isolation. On the base level we use [CoreOS](http://coreos.com/) and [cloud-init](https://coreos.com/os/docs/latest/cloud-config.html) to bootstrap a cluster of heterogeneous nodes which I have named Patricians, Peasants, as well as controller nodes that don't have a cool name and are just called Controllers. Maybe we should switch to Constables.
|
||||
|
||||
|
||||
{: .big-img}
|
||||
|
||||
|
||||
|
||||
|
||||
Patrician nodes are where the bulk of our infrastructure runs. These nodes have appropriate network interfaces to communicate with our backend services as well as be routable by various load balancers. This is where our logging is aggregated and sent off to logging services, our many microservices for reporting and processing the results of job runs, and our many microservices for handling API calls.
|
||||
|
||||
|
||||
|
||||
On the other end of the spectrum are the Peasant nodes where the public jobs are run. Public jobs consist of worker pods reading from a job queue and dynamically generating new runner pods to handle execution of the job. The job itself is an incarnation of our open source [CLI tool](http://github.com/wercker/wercker), the same one you can run on your laptop with Docker installed. These nodes have very limited access to the rest of the infrastructure and the containers the jobs themselves run in are even further isolated.
|
||||
|
||||
|
||||
|
||||
Controllers are controllers, I bet ours look exactly the same as yours.
|
||||
|
||||
|
||||
|
||||
**Dynamic Pods**
|
||||
|
||||
Our heaviest use of the Kubernetes API is definitely our system of creating dynamic pods to serve as the runtime environment for our actual job execution. After pulling job descriptions from the queue we define a new pod containing all the relevant environment for checking out code, managing a cache, executing a job and uploading artifacts. We launch the pod, monitor its progress, and destroy it when the job is done.
|
||||
|
||||
|
||||
|
||||
**Ingresses**
|
||||
|
||||
In order to provide a backend for HTTP API calls and allow self-registration of handlers we make use of the Ingress system in Kubernetes. It wasn't the clearest thing to set up, but reading through enough of the [nginx example](http://blog.kubernetes.io/2016/03/Kubernetes-1.2-and-simplifying-advanced-networking-with-Ingress.html) eventually got us to a good spot where it is easy to connect services to the frontend.
|
||||
|
||||
|
||||
|
||||
**Upcoming Features in 1.3**
|
||||
|
||||
|
||||
|
||||
While we generally treat all of our pods and containers as ephemeral and expect rapid restarts on failures, we are looking forward to Pet Sets and Init Containers as ways to optimize some of our processes. We are also pleased with official support for [Minikube](https://github.com/kubernetes/minikube) coming along as it improves our local testing and development.
|
||||
|
||||
|
||||
|
||||
**Conclusion**
|
||||
|
||||
|
||||
|
||||
Kubernetes saves us the non-trivial task of managing many, many containers across many nodes. It provides a robust API and tooling for introspecting these containers, and it includes much built in support for logging, metrics, monitoring and debugging. Service discovery and networking alone saves us so much time and speeds development immensely.
|
||||
|
||||
Cheers to you Kubernetes, keep up the good work :)
|
||||
|
||||
|
||||
|
||||
_-- Andy Smith, CTO, Wercker_
|
||||
|
|
@ -0,0 +1,412 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Autoscaling in Kubernetes "
|
||||
date: Wednesday, July 12, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: this post is part of a [series of in-depth articles](http://blog.kubernetes.io/2016/07/five-days-of-kubernetes-1.3.html) on what's new in Kubernetes 1.3_
|
||||
|
||||
Customers using Kubernetes respond to end user requests quickly and ship software faster than ever before. But what happens when you build a service that is even more popular than you planned for, and run out of compute? In [Kubernetes 1.3](http://blog.kubernetes.io/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads.html), we are proud to announce that we have a solution: autoscaling. On [Google Compute Engine](https://cloud.google.com/compute/) (GCE) and [Google Container Engine](https://cloud.google.com/container-engine/) (GKE) (and coming soon on [AWS](https://aws.amazon.com/)), Kubernetes will automatically scale up your cluster as soon as you need it, and scale it back down to save you money when you don’t.
|
||||
|
||||
|
||||
### Benefits of Autoscaling
|
||||
|
||||
To understand better where autoscaling would provide the most value, let’s start with an example. Imagine you have a 24/7 production service with a load that is variable in time, where it is very busy during the day in the US, and relatively low at night. Ideally, we would want the number of nodes in the cluster and the number of pods in deployment to dynamically adjust to the load to meet end user demand. The new Cluster Autoscaling feature together with Horizontal Pod Autoscaler can handle this for you automatically.
|
||||
|
||||
|
||||
### Setting Up Autoscaling on GCE
|
||||
|
||||
The following instructions apply to GCE. For GKE please check the autoscaling section in cluster operations manual available [here](https://cloud.google.com/container-engine/docs/clusters/operations#create_a_cluster_with_autoscaling).
|
||||
|
||||
Before we begin, we need to have an active GCE project with Google Cloud Monitoring, Google Cloud Logging and Stackdriver enabled. For more information on project creation, please read our [Getting Started Guide](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/gce.md#prerequisites). We also need to download a recent version of Kubernetes project (version [v1.3.0](http://v1.3.0/) or later).
|
||||
|
||||
First, we set up a cluster with Cluster Autoscaler turned on. The number of nodes in the cluster will start at 2, and autoscale up to a maximum of 5. To implement this, we’ll export the following environment variables:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
export NUM\_NODES=2
|
||||
|
||||
export KUBE\_AUTOSCALER\_MIN\_NODES=2
|
||||
|
||||
export KUBE\_AUTOSCALER\_MAX\_NODES=5
|
||||
|
||||
export KUBE\_ENABLE\_CLUSTER\_AUTOSCALER=true
|
||||
```
|
||||
|
||||
|
||||
and start the cluster by running:
|
||||
|
||||
|
||||
|
||||
```
|
||||
./cluster/kube-up.sh
|
||||
```
|
||||
|
||||
|
||||
|
||||
The kube-up.sh script creates a cluster together with Cluster Autoscaler add-on. The autoscaler will try to add new nodes to the cluster if there are pending pods which could schedule on a new node.
|
||||
|
||||
|
||||
|
||||
Let’s see our cluster, it should have two nodes:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get nodes
|
||||
|
||||
NAME STATUS AGE
|
||||
|
||||
kubernetes-master Ready,SchedulingDisabled 2m
|
||||
|
||||
kubernetes-minion-group-de5q Ready 2m
|
||||
|
||||
kubernetes-minion-group-yhdx Ready 1m
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### Run & Expose PHP-Apache Server
|
||||
|
||||
|
||||
|
||||
To demonstrate autoscaling we will use a custom docker image based on php-apache server. The image can be found [here](https://github.com/kubernetes/kubernetes/blob/8caeec429ee1d2a9df7b7a41b21c626346b456fb/docs/user-guide/horizontal-pod-autoscaling/image). It defines [index.php](https://github.com/kubernetes/kubernetes/blob/8caeec429ee1d2a9df7b7a41b21c626346b456fb/docs/user-guide/horizontal-pod-autoscaling/image/index.php) page which performs some CPU intensive computations.
|
||||
|
||||
|
||||
|
||||
First, we’ll start a deployment running the image and expose it as a service:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl run php-apache \
|
||||
|
||||
--image=gcr.io/google\_containers/hpa-example \
|
||||
|
||||
--requests=cpu=500m,memory=500M --expose --port=80
|
||||
|
||||
service "php-apache" createddeployment "php-apache" created
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Now, we will wait some time and verify that both the deployment and the service were correctly created and are running:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get deployment
|
||||
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
|
||||
php-apache 1 1 1 1 49s
|
||||
|
||||
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
|
||||
php-apache-2046965998-z65jn 1/1 Running 0 30s
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
We may now check that php-apache server works correctly by calling wget with the service's address:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl run -i --tty service-test --image=busybox /bin/sh
|
||||
Hit enter for command prompt
|
||||
$ wget -q -O- http://php-apache.default.svc.cluster.local
|
||||
|
||||
OK!
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### Starting Horizontal Pod Autoscaler
|
||||
|
||||
|
||||
Now that the deployment is running, we will create a Horizontal Pod Autoscaler for it. To create it, we will use kubectl autoscale command, which looks like this:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
This defines a Horizontal Ppod Autoscaler that maintains between 1 and 10 replicas of the Pods controlled by the php-apache deployment we created in the first step of these instructions. Roughly speaking, the horizontal autoscaler will increase and decrease the number of replicas (via the deployment) so as to maintain an average CPU utilization across all Pods of 50% (since each pod requests 500 milli-cores by [kubectl run](https://github.com/kubernetes/kubernetes/blob/8caeec429ee1d2a9df7b7a41b21c626346b456fb/docs/user-guide/horizontal-pod-autoscaling/README.md#kubectl-run), this means average CPU usage of 250 milli-cores). See [here](https://github.com/kubernetes/kubernetes/blob/8caeec429ee1d2a9df7b7a41b21c626346b456fb/docs/design/horizontal-pod-autoscaler.md#autoscaling-algorithm) for more details on the algorithm.
|
||||
|
||||
|
||||
|
||||
We may check the current status of autoscaler by running:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get hpa
|
||||
|
||||
NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE
|
||||
|
||||
php-apache Deployment/php-apache/scale 50% 0% 1 20 14s
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Please note that the current CPU consumption is 0% as we are not sending any requests to the server (the CURRENT column shows the average across all the pods controlled by the corresponding replication controller).
|
||||
|
||||
#### Raising the Load
|
||||
|
||||
|
||||
Now, we will see how our autoscalers (Cluster Autoscaler and Horizontal Pod Autoscaler) react on the increased load of the server. We will start two infinite loops of queries to our server (please run them in different terminals):
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl run -i --tty load-generator --image=busybox /bin/sh
|
||||
Hit enter for command prompt
|
||||
$ while true; do wget -q -O- http://php-apache.default.svc.cluster.local; done
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
We need to wait a moment (about one minute) for stats to propagate. Afterwards, we will examine status of Horizontal Pod Autoscaler:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get hpa
|
||||
|
||||
NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE
|
||||
|
||||
php-apache Deployment/php-apache/scale 50% 310% 1 20 2m
|
||||
|
||||
|
||||
|
||||
$ kubectl get deployment php-apache
|
||||
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
|
||||
php-apache 7 7 7 3 4m
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Horizontal Pod Autoscaler has increased the number of pods in our deployment to 7. Let’s now check, if all the pods are running:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
jsz@jsz-desk2:~/k8s-src$ kubectl get pods
|
||||
|
||||
php-apache-2046965998-3ewo6 0/1 Pending 0 1m
|
||||
|
||||
php-apache-2046965998-8m03k 1/1 Running 0 1m
|
||||
|
||||
php-apache-2046965998-ddpgp 1/1 Running 0 5m
|
||||
|
||||
php-apache-2046965998-lrik6 1/1 Running 0 1m
|
||||
|
||||
php-apache-2046965998-nj465 0/1 Pending 0 1m
|
||||
|
||||
php-apache-2046965998-tmwg1 1/1 Running 0 1m
|
||||
|
||||
php-apache-2046965998-xkbw1 0/1 Pending 0 1m
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
As we can see, some pods are pending. Let’s describe one of pending pods to get the reason of the pending state:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl describe pod php-apache-2046965998-3ewo6
|
||||
|
||||
Name: php-apache-2046965998-3ewo6
|
||||
|
||||
Namespace: default
|
||||
|
||||
...
|
||||
|
||||
Events:
|
||||
|
||||
FirstSeen From SubobjectPath Type Reason Message
|
||||
|
||||
|
||||
|
||||
1m {default-scheduler } Warning FailedScheduling pod (php-apache-2046965998-3ewo6) failed to fit in any node
|
||||
|
||||
fit failure on node (kubernetes-minion-group-yhdx): Insufficient CPU
|
||||
|
||||
fit failure on node (kubernetes-minion-group-de5q): Insufficient CPU
|
||||
|
||||
|
||||
|
||||
1m {cluster-autoscaler } Normal TriggeredScaleUp pod triggered scale-up, mig: kubernetes-minion-group, sizes (current/new): 2/3
|
||||
```
|
||||
|
||||
|
||||
|
||||
The pod is pending as there was no CPU in the system for it. We see there’s a TriggeredScaleUp event connected with the pod. It means that the pod triggered reaction of Cluster Autoscaler and a new node will be added to the cluster. Now we’ll wait for the reaction (about 3 minutes) and list all nodes:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get nodes
|
||||
|
||||
NAME STATUS AGE
|
||||
|
||||
kubernetes-master Ready,SchedulingDisabled 9m
|
||||
|
||||
kubernetes-minion-group-6z5i Ready 43s
|
||||
|
||||
kubernetes-minion-group-de5q Ready 9m
|
||||
|
||||
kubernetes-minion-group-yhdx Ready 9m
|
||||
```
|
||||
|
||||
|
||||
|
||||
As we see a new node kubernetes-minion-group-6z5i was added by Cluster Autoscaler. Let’s verify that all pods are now running:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
|
||||
php-apache-2046965998-3ewo6 1/1 Running 0 3m
|
||||
|
||||
php-apache-2046965998-8m03k 1/1 Running 0 3m
|
||||
|
||||
php-apache-2046965998-ddpgp 1/1 Running 0 7m
|
||||
|
||||
php-apache-2046965998-lrik6 1/1 Running 0 3m
|
||||
|
||||
php-apache-2046965998-nj465 1/1 Running 0 3m
|
||||
|
||||
php-apache-2046965998-tmwg1 1/1 Running 0 3m
|
||||
|
||||
php-apache-2046965998-xkbw1 1/1 Running 0 3m
|
||||
```
|
||||
|
||||
|
||||
|
||||
After the node addition all php-apache pods are running!
|
||||
|
||||
|
||||
|
||||
#### Stop Load
|
||||
|
||||
|
||||
We will finish our example by stopping the user load. We’ll terminate both infinite while loops sending requests to the server and verify the result state:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get hpa
|
||||
|
||||
NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE
|
||||
|
||||
php-apache Deployment/php-apache/scale 50% 0% 1 10 16m
|
||||
|
||||
|
||||
|
||||
$ kubectl get deployment php-apache
|
||||
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
|
||||
php-apache 1 1 1 1 14m
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
As we see, in the presented case CPU utilization dropped to 0, and the number of replicas dropped to 1.
|
||||
|
||||
|
||||
|
||||
After deleting pods most of the cluster resources are unused. Scaling the cluster down may take more time than scaling up because Cluster Autoscaler makes sure that the node is really not needed so that short periods of inactivity (due to pod upgrade etc) won’t trigger node deletion (see [cluster autoscaler doc](https://github.com/kubernetes/kubernetes.github.io/blob/release-1.3/docs/admin/cluster-management.md#cluster-autoscaling)). After approximately 10-12 minutes you can verify that the number of nodes in the cluster dropped:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ kubectl get nodes
|
||||
|
||||
NAME STATUS AGE
|
||||
|
||||
kubernetes-master Ready,SchedulingDisabled 37m
|
||||
|
||||
kubernetes-minion-group-de5q Ready 36m
|
||||
|
||||
kubernetes-minion-group-yhdx Ready 36m
|
||||
```
|
||||
|
||||
|
||||
|
||||
The number of nodes in our cluster is now two again as node kubernetes-minion-group-6z5i was removed by Cluster Autoscaler.
|
||||
|
||||
|
||||
|
||||
### Other use cases
|
||||
|
||||
|
||||
|
||||
As we have shown, it is very easy to dynamically adjust the number of pods to the load using a combination of Horizontal Pod Autoscaler and Cluster Autoscaler.
|
||||
|
||||
|
||||
|
||||
However Cluster Autoscaler alone can also be quite helpful whenever there are irregularities in the cluster load. For example, clusters related to development or continuous integration tests can be less needed on weekends or at night. Batch processing clusters may have periods when all jobs are over and the new will only start in couple hours. Having machines that do nothing is a waste of money.
|
||||
|
||||
|
||||
|
||||
In all of these cases Cluster Autoscaler can reduce the number of unused nodes and give quite significant savings because you will only pay for these nodes that you actually need to run your pods. It also makes sure that you always have enough compute power to run your tasks.
|
||||
|
||||
|
||||
|
||||
_-- Jerzy Szczepkowski and Marcin Wielgus, Software Engineers, Google_
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Bringing End-to-End Kubernetes Testing to Azure (Part 2) "
|
||||
date: Tuesday, July 18, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s Note: Today’s guest post is Part II from a [series](http://blog.kubernetes.io/2016/06/bringing-end-to-end-testing-to-azure.html) by Travis Newhouse, Chief Architect at AppFormix, writing about their contributions to Kubernetes._
|
||||
|
||||
Historically, Kubernetes testing has been hosted by Google, running e2e tests on [Google Compute Engine](https://cloud.google.com/compute/) (GCE) and [Google Container Engine](https://cloud.google.com/container-engine/) (GKE). In fact, the gating checks for the submit-queue are a subset of tests executed on these test platforms. Federated testing aims to expand test coverage by enabling organizations to host test jobs for a variety of platforms and contribute test results to benefit the Kubernetes project. Members of the Kubernetes test team at Google and SIG-Testing have created a [Kubernetes test history dashboard](http://storage.googleapis.com/kubernetes-test-history/static/index.html) that publishes the results from all federated test jobs (including those hosted by Google).
|
||||
|
||||
In this blog post, we describe extending the e2e test jobs for Azure, and show how to contribute a federated test to the Kubernetes project.
|
||||
|
||||
**END-TO-END INTEGRATION TESTS FOR AZURE**
|
||||
|
||||
After successfully implementing [“development distro” scripts to automate deployment of Kubernetes on Azure](http://blog.kubernetes.io/2016/06/bringing-end-to-end-testing-to-azure.html), our next goal was to run e2e integration tests and share the results with the Kubernetes community.
|
||||
|
||||
We automated our workflow for executing e2e tests of Kubernetes on Azure by defining a nightly job in our private Jenkins server. Figure 2 shows the workflow that uses kube-up.sh to deploy Kubernetes on Ubuntu virtual machines running in Azure, then executes the e2e tests. On completion of the tests, the job uploads the test results and logs to a Google Cloud Storage directory, in a format that can be processed by the [scripts that produce the test history dashboard](https://github.com/kubernetes/test-infra/tree/master/jenkins/test-history). Our Jenkins job uses the hack/jenkins/e2e-runner.sh and hack/jenkins/upload-to-gcs.sh scripts to produce the results in the correct format.
|
||||
|
||||
|
||||
| {: .big-img} |
|
||||
| Figure 2 - Nightly test job workflow |
|
||||
|
||||
|
||||
**HOW TO CONTRIBUTE AN E2E TEST**
|
||||
|
||||
Throughout our work to create the Azure e2e test job, we have collaborated with members of [SIG-Testing](https://github.com/kubernetes/community/tree/master/sig-testing) to find a way to publish the results to the Kubernetes community. The results of this collaboration are documentation and a streamlined process to contribute results from a federated test job. The steps to contribute e2e test results can be summarized in 4 steps.
|
||||
|
||||
|
||||
1. Create a [Google Cloud Storage](https://cloud.google.com/storage/) bucket in which to publish the results.
|
||||
2. Define an automated job to run the e2e tests. By setting a few environment variables, hack/jenkins/e2e-runner.sh deploys Kubernetes binaries and executes the tests.
|
||||
3. Upload the results using hack/jenkins/upload-to-gcs.sh.
|
||||
4. Incorporate the results into the test history dashboard by submitting a pull-request with modifications to a few files in [kubernetes/test-infra](https://github.com/kubernetes/test-infra).
|
||||
|
||||
The federated tests documentation describes these steps in more detail. The scripts to run e2e tests and upload results simplifies the work to contribute a new federated test job. The specific steps to set up an automated test job and an appropriate environment in which to deploy Kubernetes are left to the reader’s preferences. For organizations using Jenkins, the jenkins-job-builder configurations for GCE and GKE tests may provide helpful examples.
|
||||
|
||||
**RETROSPECTIVE**
|
||||
|
||||
The e2e tests on Azure have been running for several weeks now. During this period, we have found two issues in Kubernetes. Weixu Zhuang immediately published fixes that have been merged into the Kubernetes master branch.
|
||||
|
||||
The first issue happened when we wanted to bring up the Kubernetes cluster using SaltStack on Azure using Ubuntu VMs. A commit (07d7cfd3) modified the OpenVPN certificate generation script to use a variable that was only initialized by scripts in the cluster/ubuntu. Strict checking on existence of parameters by the certificate generation script caused other platforms that use the script to fail (e.g. our changes to support Azure). We submitted a [pull-request that fixed the issue](https://github.com/kubernetes/kubernetes/pull/21357) by initializing the variable with a default value to make the certificate generation scripts more robust across all platform types.
|
||||
|
||||
The second [pull-request cleaned up an unused import](https://github.com/kubernetes/kubernetes/pull/22321) in the Daemonset unit test file. The import statement broke the unit tests with golang 1.4. Our nightly Jenkins job helped us find this error and we promptly pushed a fix for it.
|
||||
|
||||
**CONCLUSION AND FUTURE WORK**
|
||||
|
||||
The addition of a nightly e2e test job for Kubernetes on Azure has helped to define the process to contribute a federated test to the Kubernetes project. During the course of the work, we also saw the immediate benefit of expanding test coverage to more platforms when our Azure test job identified compatibility issues.
|
||||
|
||||
We want to thank Aaron Crickenberger, Erick Fejta, Joe Finney, and Ryan Hutchinson for their help to incorporate the results of our Azure e2e tests into the Kubernetes test history. If you’d like to get involved with testing to create a stable, high quality releases of Kubernetes, join us in the [Kubernetes Testing SIG (sig-testing)](https://github.com/kubernetes/community/tree/master/sig-testing).
|
||||
|
||||
|
||||
|
||||
|
||||
_--Travis Newhouse, Chief Architect at AppFormix_
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Citrix + Kubernetes = A Home Run "
|
||||
date: Friday, July 14, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
_Editor’s note: today’s guest post is by Mikko Disini, a Director of Product Management at Citrix Systems, sharing their collaboration experience on a Kubernetes integration. _
|
||||
|
||||
Technical collaboration is like sports. If you work together as a team, you can go down the homestretch and pull through for a win. That’s our experience with the Google Cloud Platform team.
|
||||
|
||||
Recently, we approached Google Cloud Platform (GCP) to collaborate on behalf of Citrix customers and the broader enterprise market looking to migrate workloads. This migration required including the [NetScaler Docker load balancer](https://www.citrix.com/blogs/2016/06/20/the-best-docker-load-balancer-at-dockercon-in-seattle-this-week/), CPX, into Kubernetes nodes and resolving any issues with getting traffic into the CPX proxies.
|
||||
|
||||
**Why NetScaler and Kubernetes?**
|
||||
|
||||
|
||||
1. Citrix customers want the same Layer 4 to Layer 7 capabilities from NetScaler that they have on-prem as they move to the cloud as they begin deploying their container and microservices architecture with Kubernetes
|
||||
2. Kubernetes provides a proven infrastructure for running containers and VMs with automated workload delivery
|
||||
3. NetScaler CPX provides Layer 4 to Layer 7 services and highly efficient telemetry data to a logging and analytics platform, [NetScaler Management and Analytics System](https://www.citrix.com/blogs/2016/05/24/introducing-the-next-generation-netscaler-management-and-analytics-system/)
|
||||
|
||||
I wish all our experiences working together with a technical partner were as good as working with GCP. We had a list of issues to enable our use cases and were able to collaborate swiftly on a solution. To resolve these, GCP team offered in depth technical assistance, working with Citrix such that NetScaler CPX can spin up and take over as a client-side proxy running on each host.
|
||||
|
||||
Next, NetScaler CPX needed to be inserted in the data path of GCP ingress load balancer so that NetScaler CPX can spread traffic to front end web servers. The NetScaler team made modifications so that NetScaler CPX listens to API server events and configures itself to create a VIP, IP table rules and server rules to take ingress traffic and load balance across front end applications. Google Cloud Platform team provided feedback and assistance to verify modifications made to overcome the technical hurdles. Done!
|
||||
|
||||
NetScaler CPX use case is supported in [Kubernetes 1.3](http://blog.kubernetes.io/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads.html). Citrix customers and the broader enterprise market will have the opportunity to leverage NetScaler with Kubernetes, thereby lowering the friction to move workloads to the cloud.
|
||||
|
||||
You can learn more about NetScaler CPX [here](https://www.citrix.com/networking/microservices.html).
|
||||
|
||||
|
||||
_ -- Mikko Disini, Director of Product Management - NetScaler, Citrix Systems_
|
||||
|
|
@ -0,0 +1,344 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Cross Cluster Services - Achieving Higher Availability for your Kubernetes Applications "
|
||||
date: Friday, July 14, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
_Editor’s note: this post is part of a [series of in-depth articles](http://blog.kubernetes.io/2016/07/five-days-of-kubernetes-1.3.html) on what's new in Kubernetes 1.3_
|
||||
|
||||
As Kubernetes users scale their production deployments we’ve heard a clear desire to deploy services across zone, region, cluster and cloud boundaries. Services that span clusters provide geographic distribution, enable hybrid and multi-cloud scenarios and improve the level of high availability beyond single cluster multi-zone deployments. Customers who want their services to span one or more (possibly remote) clusters, need them to be reachable in a consistent manner from both within and outside their clusters.
|
||||
|
||||
In Kubernetes 1.3, our goal was to minimize the friction points and reduce the management/operational overhead associated with deploying a service with geographic distribution to multiple clusters. This post explains how to do this.
|
||||
|
||||
Note: Though the examples used here leverage Google Container Engine ([GKE](https://cloud.google.com/container-engine/)) to provision Kubernetes clusters, they work anywhere you want to deploy Kubernetes.
|
||||
|
||||
Let’s get started. The first step is to create is to create Kubernetes clusters into 4 Google Cloud Platform (GCP) regions using GKE.
|
||||
|
||||
|
||||
- asia-east1-b
|
||||
- europe-west1-b
|
||||
- us-east1-b
|
||||
- us-central1-b
|
||||
|
||||
Let’s run the following commands to build the clusters:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
gcloud container clusters create gce-asia-east1 \
|
||||
|
||||
--scopes cloud-platform \
|
||||
|
||||
--zone asia-east1-b
|
||||
|
||||
gcloud container clusters create gce-europe-west1 \
|
||||
|
||||
--scopes cloud-platform \
|
||||
|
||||
--zone=europe-west1-b
|
||||
|
||||
gcloud container clusters create gce-us-east1 \
|
||||
|
||||
--scopes cloud-platform \
|
||||
|
||||
--zone=us-east1-b
|
||||
|
||||
gcloud container clusters create gce-us-central1 \
|
||||
|
||||
--scopes cloud-platform \
|
||||
|
||||
--zone=us-central1-b
|
||||
```
|
||||
|
||||
|
||||
Let’s verify the clusters are created:
|
||||
|
||||
|
||||
|
||||
```
|
||||
gcloud container clusters list
|
||||
|
||||
NAME ZONE MASTER\_VERSION MASTER\_IP NUM\_NODES STATUS
|
||||
gce-asia-east1 asia-east1-b 1.2.4 104.XXX.XXX.XXX 3 RUNNING
|
||||
gce-europe-west1 europe-west1-b 1.2.4 130.XXX.XX.XX 3 RUNNING
|
||||
gce-us-central1 us-central1-b 1.2.4 104.XXX.XXX.XX 3 RUNNING
|
||||
gce-us-east1 us-east1-b 1.2.4 104.XXX.XX.XXX 3 RUNNING
|
||||
```
|
||||
|
||||
|
||||
|
||||
[](https://lh6.googleusercontent.com/LEMtlOvr6i_iK1DwVmS-ltSKU5PmjrrN287sxwvyiGH-QLjOhF25RUjVTVt4IUo-0oGXvj8bxfRFCxTZa_5Qfv_LjxglshTxcnpm73E6Uy7MgVPTiI2GevdwAogHenZIb2S6A6lr)
|
||||
|
||||
|
||||
|
||||
The next step is to bootstrap the clusters and deploy the federation control plane on one of the clusters that has been provisioned. If you’d like to follow along, refer to Kelsey Hightower’s [tutorial](https://github.com/kelseyhightower/kubernetes-cluster-federation) which walks through the steps involved.
|
||||
|
||||
|
||||
|
||||
**Federated Services**
|
||||
|
||||
|
||||
|
||||
[Federated Services](https://github.com/kubernetes/kubernetes/blob/release-1.3/docs/design/federated-services.md) are directed to the Federation API endpoint and specify the desired properties of your service.
|
||||
|
||||
|
||||
|
||||
Once created, the Federated Service automatically:
|
||||
|
||||
- creates matching Kubernetes Services in every cluster underlying your cluster federation,
|
||||
- monitors the health of those service "shards" (and the clusters in which they reside), and
|
||||
- manages a set of DNS records in a public DNS provider (like Google Cloud DNS, or AWS Route 53), thus ensuring that clients of your federated service can seamlessly locate an appropriate healthy service endpoint at all times, even in the event of cluster, availability zone or regional outages.
|
||||
|
||||
Clients inside your federated Kubernetes clusters (i.e. Pods) will automatically find the local shard of the federated service in their cluster if it exists and is healthy, or the closest healthy shard in a different cluster if it does not.
|
||||
|
||||
|
||||
|
||||
Federations of Kubernetes Clusters can include clusters running in different cloud providers (e.g. GCP, AWS), and on-premise (e.g. on OpenStack). All you need to do is create your clusters in the appropriate cloud providers and/or locations, and register each cluster's API endpoint and credentials with your Federation API Server.
|
||||
|
||||
|
||||
|
||||
In our example, we have clusters created in 4 regions along with a federated control plane API deployed in one of our clusters, that we’ll be using to provision our service. See diagram below for visual representation.
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
**Creating a Federated Service**
|
||||
|
||||
|
||||
|
||||
Let’s list out all the clusters in our federation:
|
||||
|
||||
|
||||
|
||||
```
|
||||
kubectl --context=federation-cluster get clusters
|
||||
|
||||
NAME STATUS VERSION AGE
|
||||
gce-asia-east1 Ready 1m
|
||||
gce-europe-west1 Ready 57s
|
||||
gce-us-central1 Ready 47s
|
||||
gce-us-east1 Ready 34s
|
||||
```
|
||||
|
||||
|
||||
|
||||
Let’s create a federated service object:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
kubectl --context=federation-cluster create -f services/nginx.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
The '--context=federation-cluster' flag tells kubectl to submit the request to the Federation API endpoint, with the appropriate credentials. The federated service will automatically create and maintain matching Kubernetes services in all of the clusters underlying your federation.
|
||||
|
||||
|
||||
|
||||
You can verify this by checking in each of the underlying clusters, for example:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
kubectl --context=gce-asia-east1a get svc nginx
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
nginx 10.63.250.98 104.199.136.89 80/TCP 9m
|
||||
```
|
||||
|
||||
|
||||
|
||||
The above assumes that you have a context named 'gce-asia-east1a' configured in your client for your cluster in that zone. The name and namespace of the underlying services will automatically match those of the federated service that you created above.
|
||||
|
||||
|
||||
|
||||
The status of your Federated Service will automatically reflect the real-time status of the underlying Kubernetes services, for example:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
kubectl --context=federation-cluster describe services nginx
|
||||
|
||||
Name: nginx
|
||||
Namespace: default
|
||||
Labels: run=nginx
|
||||
Selector: run=nginx
|
||||
Type: LoadBalancer
|
||||
IP:
|
||||
LoadBalancer Ingress: 104.XXX.XX.XXX, 104.XXX.XX.XXX, 104.XXX.XX.XXX, 104.XXX.XXX.XX
|
||||
Port: http 80/TCP
|
||||
Endpoints: \<none\>
|
||||
Session Affinity: None
|
||||
No events.
|
||||
```
|
||||
|
||||
|
||||
|
||||
The 'LoadBalancer Ingress' addresses of your federated service corresponds with the 'LoadBalancer Ingress' addresses of all of the underlying Kubernetes services. For inter-cluster and inter-cloud-provider networking between service shards to work correctly, your services need to have an externally visible IP address. Service Type: Loadbalancer is typically used here.
|
||||
|
||||
|
||||
|
||||
Note also what we have not yet provisioned any backend Pods to receive the network traffic directed to these addresses (i.e. 'Service Endpoints'), so the federated service does not yet consider these to be healthy service shards, and has accordingly not yet added their addresses to the DNS records for this federated service.
|
||||
|
||||
|
||||
|
||||
**Adding Backend Pods**
|
||||
|
||||
|
||||
|
||||
To render the underlying service shards healthy, we need to add backend Pods behind them. This is currently done directly against the API endpoints of the underlying clusters (although in future the Federation server will be able to do all this for you with a single command, to save you the trouble). For example, to create backend Pods in our underlying clusters:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
for CLUSTER in asia-east1-a europe-west1-a us-east1-a us-central1-a
|
||||
do
|
||||
kubectl --context=$CLUSTER run nginx --image=nginx:1.11.1-alpine --port=80
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
|
||||
**Verifying Public DNS Records**
|
||||
|
||||
|
||||
|
||||
Once the Pods have successfully started and begun listening for connections, Kubernetes in each cluster (via automatic health checks) will report them as healthy endpoints of the service in that cluster. The cluster federation will in turn consider each of these service 'shards' to be healthy, and place them in serving by automatically configuring corresponding public DNS records. You can use your preferred interface to your configured DNS provider to verify this. For example, if your Federation is configured to use Google Cloud DNS, and a managed DNS domain 'example.com':
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$ gcloud dns managed-zones describe example-dot-com
|
||||
|
||||
creationTime: '2016-06-26T18:18:39.229Z'
|
||||
description: Example domain for Kubernetes Cluster Federation
|
||||
dnsName: example.com.
|
||||
id: '3229332181334243121'
|
||||
kind: dns#managedZone
|
||||
name: example-dot-com
|
||||
nameServers:
|
||||
- ns-cloud-a1.googledomains.com.
|
||||
- ns-cloud-a2.googledomains.com.
|
||||
- ns-cloud-a3.googledomains.com.
|
||||
- ns-cloud-a4.googledomains.com.
|
||||
|
||||
$ gcloud dns record-sets list --zone example-dot-com
|
||||
|
||||
NAME TYPE TTL DATA
|
||||
example.com. NS 21600 ns-cloud-e1.googledomains.com., ns-cloud-e2.googledomains.com.
|
||||
example.com. SOA 21600 ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 3600 1209600 300
|
||||
nginx.mynamespace.myfederation.svc.example.com. A 180 104.XXX.XXX.XXX, 130.XXX.XX.XXX, 104.XXX.XX.XXX, 104.XXX.XXX.XX
|
||||
nginx.mynamespace.myfederation.svc.us-central1-a.example.com. A 180 104.XXX.XXX.XXX
|
||||
nginx.mynamespace.myfederation.svc.us-central1.example.com.
|
||||
nginx.mynamespace.myfederation.svc.us-central1.example.com. A 180 104.XXX.XXX.XXX, 104.XXX.XXX.XXX, 104.XXX.XXX.XXX
|
||||
nginx.mynamespace.myfederation.svc.asia-east1-a.example.com. A 180 130.XXX.XX.XXX
|
||||
nginx.mynamespace.myfederation.svc.asia-east1.example.com.
|
||||
nginx.mynamespace.myfederation.svc.asia-east1.example.com. A 180 130.XXX.XX.XXX, 130.XXX.XX.XXX
|
||||
nginx.mynamespace.myfederation.svc.europe-west1.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.example.com.
|
||||
... etc.
|
||||
```
|
||||
|
||||
|
||||
|
||||
Note: If your Federation is configured to use AWS Route53, you can use one of the equivalent AWS tools, for example:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
$aws route53 list-hosted-zones
|
||||
|
||||
and
|
||||
|
||||
$aws route53 list-resource-record-sets --hosted-zone-id Z3ECL0L9QLOVBX
|
||||
```
|
||||
|
||||
|
||||
|
||||
Whatever DNS provider you use, any DNS query tool (for example 'dig' or 'nslookup') will of course also allow you to see the records created by the Federation for you.
|
||||
|
||||
|
||||
|
||||
**Discovering a Federated Service from pods Inside your Federated Clusters**
|
||||
|
||||
|
||||
|
||||
By default, Kubernetes clusters come preconfigured with a cluster-local DNS server ('KubeDNS'), as well as an intelligently constructed DNS search path which together ensure that DNS queries like "myservice", "myservice.mynamespace", "bobsservice.othernamespace" etc issued by your software running inside Pods are automatically expanded and resolved correctly to the appropriate service IP of services running in the local cluster.
|
||||
|
||||
|
||||
|
||||
With the introduction of Federated Services and Cross-Cluster Service Discovery, this concept is extended to cover Kubernetes services running in any other cluster across your Cluster Federation, globally. To take advantage of this extended range, you use a slightly different DNS name (e.g. myservice.mynamespace.myfederation) to resolve federated services. Using a different DNS name also avoids having your existing applications accidentally traversing cross-zone or cross-region networks and you incurring perhaps unwanted network charges or latency, without you explicitly opting in to this behavior.
|
||||
|
||||
|
||||
|
||||
So, using our NGINX example service above, and the federated service DNS name form just described, let's consider an example: A Pod in a cluster in the us-central1-a availability zone needs to contact our NGINX service. Rather than use the service's traditional cluster-local DNS name ("nginx.mynamespace", which is automatically expanded to"nginx.mynamespace.svc.cluster.local") it can now use the service's Federated DNS name, which is"nginx.mynamespace.myfederation". This will be automatically expanded and resolved to the closest healthy shard of my NGINX service, wherever in the world that may be. If a healthy shard exists in the local cluster, that service's cluster-local (typically 10.x.y.z) IP address will be returned (by the cluster-local KubeDNS). This is exactly equivalent to non-federated service resolution.
|
||||
|
||||
|
||||
|
||||
If the service does not exist in the local cluster (or it exists but has no healthy backend pods), the DNS query is automatically expanded to "nginx.mynamespace.myfederation.svc.us-central1-a.example.com". Behind the scenes, this is finding the external IP of one of the shards closest to my availability zone. This expansion is performed automatically by KubeDNS, which returns the associated CNAME record. This results in a traversal of the hierarchy of DNS records in the above example, and ends up at one of the external IP's of the Federated Service in the local us-central1 region.
|
||||
|
||||
|
||||
|
||||
It is also possible to target service shards in availability zones and regions other than the ones local to a Pod by specifying the appropriate DNS names explicitly, and not relying on automatic DNS expansion. For example, "nginx.mynamespace.myfederation.svc.europe-west1.example.com" will resolve to all of the currently healthy service shards in Europe, even if the Pod issuing the lookup is located in the U.S., and irrespective of whether or not there are healthy shards of the service in the U.S. This is useful for remote monitoring and other similar applications.
|
||||
|
||||
|
||||
|
||||
**Discovering a Federated Service from Other Clients Outside your Federated Clusters**
|
||||
|
||||
|
||||
|
||||
For external clients, automatic DNS expansion described is no longer possible. External clients need to specify one of the fully qualified DNS names of the federated service, be that a zonal, regional or global name. For convenience reasons, it is often a good idea to manually configure additional static CNAME records in your service, for example:
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
eu.nginx.acme.com CNAME nginx.mynamespace.myfederation.svc.europe-west1.example.com.
|
||||
us.nginx.acme.com CNAME nginx.mynamespace.myfederation.svc.us-central1.example.com.
|
||||
nginx.acme.com CNAME nginx.mynamespace.myfederation.svc.example.com.
|
||||
```
|
||||
|
||||
|
||||
|
||||
That way your clients can always use the short form on the left, and always be automatically routed to the closest healthy shard on their home continent. All of the required failover is handled for you automatically by Kubernetes Cluster Federation.
|
||||
|
||||
|
||||
|
||||
**Handling Failures of Backend Pods and Whole Clusters**
|
||||
|
||||
|
||||
|
||||
Standard Kubernetes service cluster-IP's already ensure that non-responsive individual Pod endpoints are automatically taken out of service with low latency. The Kubernetes cluster federation system automatically monitors the health of clusters and the endpoints behind all of the shards of your federated service, taking shards in and out of service as required. Due to the latency inherent in DNS caching (the cache timeout, or TTL for federated service DNS records is configured to 3 minutes, by default, but can be adjusted), it may take up to that long for all clients to completely fail over to an alternative cluster in in the case of catastrophic failure. However, given the number of discrete IP addresses which can be returned for each regional service endpoint (see e.g. us-central1 above, which has three alternatives) many clients will fail over automatically to one of the alternative IP's in less time than that given appropriate configuration.
|
||||
|
||||
|
||||
|
||||
**Community**
|
||||
|
||||
|
||||
|
||||
We'd love to hear feedback on Kubernetes Cross Cluster Services. To join the community:
|
||||
|
||||
- Post issues or feature requests on [GitHub](https://github.com/kubernetes/kubernetes/tree/master/federation)
|
||||
- Join us in the #federation channel on [Slack](https://kubernetes.slack.com/messages/sig-federation)
|
||||
- Participate in the [Cluster Federation SIG](https://groups.google.com/forum/#!forum/kubernetes-sig-federation)
|
||||
|
||||
|
||||
Please give Cross Cluster Services a try, and let us know how it goes!
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_-- Quinton Hoole, Engineering Lead, Google and Allan Naim, Product Manager, Google_
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Dashboard - Full Featured Web Interface for Kubernetes "
|
||||
date: Saturday, July 15, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
|
||||
_Editor’s note: this post is part of a [series of in-depth articles](http://blog.kubernetes.io/2016/07/five-days-of-kubernetes-1.3.html) on what's new in Kubernetes 1.3_
|
||||
|
||||
[Kubernetes Dashboard](http://github.com/kubernetes/dashboard) is a project that aims to bring a general purpose monitoring and operational web interface to the Kubernetes world. Three months ago we [released](http://blog.kubernetes.io/2016/04/building-awesome-user-interfaces-for-kubernetes.html) the first production ready version, and since then the dashboard has made massive improvements. In a single UI, you’re able to perform majority of possible interactions with your Kubernetes clusters without ever leaving your browser. This blog post breaks down new features introduced in the latest release and outlines the roadmap for the future.
|
||||
|
||||
**Full-Featured Dashboard**
|
||||
|
||||
Thanks to a large number of contributions from the community and project members, we were able to deliver many new features for [Kubernetes 1.3 release](http://blog.kubernetes.io/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads.html). We have been carefully listening to all the great feedback we have received from our users (see the [summary infographics](http://static.lwy.io/img/kubernetes_dashboard_infographic.png)) and addressed the highest priority requests and pain points.
|
||||
|
||||
The Dashboard UI now handles all workload resources. This means that no matter what workload type you run, it is visible in the web interface and you can do operational changes on it. For example, you can modify your stateful MySQL installation with [Pet Sets](http://kubernetes.io/docs/user-guide/petset/), do a rolling update of your web server with Deployments or install cluster monitoring with DaemonSets.
|
||||
|
||||
|
||||
|
||||
[{:.big-img} ](https://lh3.googleusercontent.com/p9bMGxPx4jE6_Z2KB-MktmyuAxyFst-bEk29M_Bn0Bj5ul7uzinH6u5WjHsMmqhGvBwlABZt06dwQ5qkBZiLq_EM1oddCmpwChvXDNXZypaS5l8uzkKuZj3PBUmzTQT4dgDxSXgz)
|
||||
|
||||
|
||||
|
||||
In addition to viewing resources, you can create, edit, update, and delete them. This feature enables many use cases. For example, you can kill a failed Pod, do a rolling update on a Deployment, or just organize your resources. You can also export and import YAML configuration files of your cloud apps and store them in a version control system.
|
||||
|
||||
|
||||
|
||||
{: .big-img}
|
||||
|
||||
|
||||
|
||||
The release includes a beta view of cluster nodes for administration and operational use cases. The UI lists all nodes in the cluster to allow for overview analysis and quick screening for problematic nodes. The details view shows all information about the node and links to pods running on it.
|
||||
|
||||
|
||||
|
||||
{: .big-img}
|
||||
|
||||
|
||||
|
||||
There are also many smaller scope new features that the we shipped with the release, namely: support for namespaced resources, internationalization, performance improvements, and many bug fixes (find out more in the [release notes](https://github.com/kubernetes/dashboard/releases/tag/v1.1.0)). All these improvements result in a better and simpler user experience of the product.
|
||||
|
||||
|
||||
|
||||
**Future Work**
|
||||
|
||||
|
||||
|
||||
The team has ambitious plans for the future spanning across multiple use cases. We are also open to all feature requests, which you can post on our [issue tracker](https://github.com/kubernetes/dashboard/issues).
|
||||
|
||||
|
||||
|
||||
Here is a list of our focus areas for the following months:
|
||||
|
||||
- [Handle more Kubernetes resources](https://github.com/kubernetes/dashboard/issues/961) - To show all resources that a cluster user may potentially interact with. Once done, Dashboard can act as a complete replacement for CLI.
|
||||
- [Monitoring and troubleshooting](https://github.com/kubernetes/dashboard/issues/962) - To add resource usage statistics/graphs to the objects shown in Dashboard. This focus area will allow for actionable debugging and troubleshooting of cloud applications.
|
||||
- [Security, auth and logging in](https://github.com/kubernetes/dashboard/issues/964) - Make Dashboard accessible from networks external to a Cluster and work with custom authentication systems.
|
||||
|
||||
|
||||
|
||||
**Connect With Us**
|
||||
|
||||
|
||||
|
||||
We would love to talk with you and hear your feedback!
|
||||
|
||||
- Email us at the [SIG-UI mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-ui)
|
||||
- Chat with us on the Kubernetes Slack [#SIG-UI channel](https://kubernetes.slack.com/messages/sig-ui/)
|
||||
- Join our meetings: 4PM CEST. See the [SIG-UI calendar](https://calendar.google.com/calendar/embed?src=google.com_52lm43hc2kur57dgkibltqc6kc%40group.calendar.google.com&ctz=Europe/Warsaw) for details.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
_-- Piotr Bryk, Software Engineer, Google_
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
permalink: /blog/:year/:month/:title
|
||||
layout: blog
|
||||
title: " Five Days of Kubernetes 1.3 "
|
||||
date: Tuesday, July 11, 2016
|
||||
pagination:
|
||||
enabled: true
|
||||
---
|
||||
Last week we [released Kubernetes 1.3](http://blog.kubernetes.io/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads.html), two years from the day when the first Kubernetes commit was pushed to GitHub. Now 30,000+ commits later from over 800 contributors, this 1.3 releases is jam packed with updates driven by feedback from users.
|
||||
|
||||
While many new improvements and features have been added in the latest release, we’ll be highlighting several that stand-out. Follow along and read these in-depth posts on what’s new and how we continue to make Kubernetes the best way to manage containers at scale.
|
||||
|
||||
|
||||
|
|
||||
Day 1
|
||||
|
|
||||
|
||||
\* [Minikube: easily run Kubernetes locally](http://blog.kubernetes.io/2016/07/minikube-easily-run-kubernetes-locally.html)
|
||||
\* [rktnetes: brings rkt container engine to Kubernetes](http://blog.kubernetes.io/2016/07/rktnetes-brings-rkt-container-engine-to-Kubernetes.html)
|
||||
|
|
||||
|
|
||||
Day 2
|
||||
|
|
||||
\* [Autoscaling in Kubernetes](http://blog.kubernetes.io/2016/07/autoscaling-in-kubernetes.html)
|
||||
\* _Partner post: [Kubernetes in Rancher, the further evolution](http://blog.kubernetes.io/2016/07/kubernetes-in-rancher-further-evolution.html)_
|
||||
|
|
||||
|
|
||||
Day 3
|
||||
|
|
||||
\* [Deploying thousand instances of Cassandra using Pet Set](http://blog.kubernetes.io/2016/07/thousand-instances-of-cassandra-using-kubernetes-pet-set.html)
|
||||
\* _Partner post: [Stateful Applications in Containers, by Diamanti](http://blog.kubernetes.io/2016/07/stateful-applications-in-containers-kubernetes.html)_
|
||||
|
|
||||
|
|
||||
Day 4
|
||||
|
|
||||
\* [Cross Cluster Services](http://blog.kubernetes.io/2016/07/cross-cluster-services.html)
|
||||
_\* Partner post: [Citrix and NetScaler CPX](http://blog.kubernetes.io/2016/07/Citrix-NetScaler-and-Kubernetes.html)_
|
||||
|
|
||||
|
|
||||
Day 5
|
||||
|
|
||||
\* [Dashboard - Full Featured Web Interface for Kubernetes](http://blog.kubernetes.io/2016/07/dashboard-web-interface-for-kubernetes.html)
|
||||
\* _Partner post: [Steering an Automation Platform at Wercker with Kubernetes](http://blog.kubernetes.io/2016/07/automation-platform-at-wercker-with-kubernetes.html)_
|
||||
|
|
||||
|
|
||||
Bonus
|
||||
|
|
||||
\* [Updates to Performance and Scalability](http://blog.kubernetes.io/2016/07/kubernetes-updates-to-performance-and-scalability-in-1.3.html)
|
||||
|
|
||||
|
||||
|
||||
|
||||
**Connect**
|
||||
|
||||
|
||||
We’d love to hear from you and see you participate in this growing community:
|
||||
|
||||
- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)
|
||||
- Post questions (or answer questions) on [Stackoverflow](https://stackoverflow.com/questions/tagged/kubernetes)
|
||||
- Connect with the community on [Slack](http://slack.kubernetes.io/)
|
||||
- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue