From ff12cf1a0ba18ae613063fbea135dfc2ae96d5f6 Mon Sep 17 00:00:00 2001 From: Ilya Dmitrichenko Date: Mon, 4 Apr 2016 17:35:06 +0100 Subject: [PATCH 1/2] coreos/azure: The provisioning code has been moved into an external repository --- .../coreos/azure/addons/skydns-rc.yaml | 99 ------ .../coreos/azure/addons/skydns-svc.yaml | 20 -- .../coreos/azure/azure-login.js | 3 - .../kubernetes-cluster-etcd-node-template.yml | 19 -- .../coreos/azure/create-kubernetes-cluster.js | 15 - .../coreos/azure/destroy-cluster.js | 7 - .../coreos/azure/expose_guestbook_app_port.sh | 29 -- .../coreos/azure/lib/azure_wrapper.js | 286 ------------------ .../coreos/azure/lib/cloud_config.js | 58 ---- .../azure/lib/deployment_logic/kubernetes.js | 77 ----- .../coreos/azure/lib/util.js | 33 -- .../coreos/azure/scale-kubernetes-cluster.js | 10 - 12 files changed, 656 deletions(-) delete mode 100644 docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml delete mode 100644 docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml delete mode 100755 docs/getting-started-guides/coreos/azure/azure-login.js delete mode 100644 docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml delete mode 100755 docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js delete mode 100755 docs/getting-started-guides/coreos/azure/destroy-cluster.js delete mode 100755 docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh delete mode 100644 docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js delete mode 100644 docs/getting-started-guides/coreos/azure/lib/cloud_config.js delete mode 100644 docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js delete mode 100644 docs/getting-started-guides/coreos/azure/lib/util.js delete mode 100755 docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js diff --git a/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml b/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml deleted file mode 100644 index ee31c5107c..0000000000 --- a/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-dns-v9 - namespace: kube-system - labels: - k8s-app: kube-dns - version: v9 - kubernetes.io/cluster-service: "true" -spec: - replicas: 3 - selector: - k8s-app: kube-dns - version: v9 - template: - metadata: - labels: - k8s-app: kube-dns - version: v9 - kubernetes.io/cluster-service: "true" - spec: - containers: - - name: etcd - image: gcr.io/google_containers/etcd:2.0.9 - resources: - limits: - cpu: 100m - memory: 50Mi - command: - - /usr/local/bin/etcd - - -data-dir - - /var/etcd/data - - -listen-client-urls - - http://127.0.0.1:2379,http://127.0.0.1:4001 - - -advertise-client-urls - - http://127.0.0.1:2379,http://127.0.0.1:4001 - - -initial-cluster-token - - skydns-etcd - volumeMounts: - - name: etcd-storage - mountPath: /var/etcd/data - - name: kube2sky - image: gcr.io/google_containers/kube2sky:1.11 - resources: - limits: - cpu: 100m - memory: 50Mi - args: - # command = "/kube2sky" - - -domain=kube.local - - -kube_master_url=http://kube-00:8080 - - name: skydns - image: gcr.io/google_containers/skydns:2015-03-11-001 - resources: - limits: - cpu: 100m - memory: 50Mi - args: - # command = "/skydns" - - -machines=http://localhost:4001 - - -addr=0.0.0.0:53 - - -domain=kube.local - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 1 - timeoutSeconds: 5 - - name: healthz - image: gcr.io/google_containers/exechealthz:1.0 - resources: - limits: - cpu: 10m - memory: 20Mi - args: - - -cmd=nslookup kubernetes.default.svc.kube.local localhost >/dev/null - - -port=8080 - ports: - - containerPort: 8080 - protocol: TCP - volumes: - - name: etcd-storage - emptyDir: {} - dnsPolicy: Default # Don't use cluster DNS. diff --git a/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml b/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml deleted file mode 100644 index c15822d6bc..0000000000 --- a/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: 10.16.0.3 - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP diff --git a/docs/getting-started-guides/coreos/azure/azure-login.js b/docs/getting-started-guides/coreos/azure/azure-login.js deleted file mode 100755 index 624916b2b5..0000000000 --- a/docs/getting-started-guides/coreos/azure/azure-login.js +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env node - -require('child_process').fork('node_modules/azure-cli/bin/azure', ['login'].concat(process.argv)); diff --git a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml deleted file mode 100644 index 4cbb480e53..0000000000 --- a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml +++ /dev/null @@ -1,19 +0,0 @@ -## This file is used as input to deployment script, which amends it as needed. -## More specifically, we need to add peer hosts for each but the elected peer. - -coreos: - units: - - name: etcd2.service - enable: true - command: start - etcd2: - name: '%H' - initial-cluster-token: 'etcd-cluster' - initial-advertise-peer-urls: 'http://%H:2380' - listen-peer-urls: 'http://%H:2380' - listen-client-urls: 'http://0.0.0.0:2379,http://0.0.0.0:4001' - advertise-client-urls: 'http://%H:2379,http://%H:4001' - initial-cluster-state: 'new' - update: - group: stable - reboot-strategy: off diff --git a/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js b/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js deleted file mode 100755 index 70248c596c..0000000000 --- a/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env node - -var azure = require('./lib/azure_wrapper.js'); -var kube = require('./lib/deployment_logic/kubernetes.js'); - -azure.create_config('kube', { 'etcd': 3, 'kube': 3 }); - -azure.run_task_queue([ - azure.queue_default_network(), - azure.queue_storage_if_needed(), - azure.queue_machines('etcd', 'stable', - kube.create_etcd_cloud_config), - azure.queue_machines('kube', 'stable', - kube.create_node_cloud_config), -]); diff --git a/docs/getting-started-guides/coreos/azure/destroy-cluster.js b/docs/getting-started-guides/coreos/azure/destroy-cluster.js deleted file mode 100755 index ce441e538a..0000000000 --- a/docs/getting-started-guides/coreos/azure/destroy-cluster.js +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env node - -var azure = require('./lib/azure_wrapper.js'); - -azure.destroy_cluster(process.argv[2]); - -console.log('The cluster had been destroyed, you can delete the state file now.'); diff --git a/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh b/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh deleted file mode 100755 index 65dfaf5d3a..0000000000 --- a/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -[ ! -z $1 ] || (echo Usage: $0 ssh_conf; exit 1) - -fe_port=$(ssh -F $1 kube-00 \ - "/opt/bin/kubectl get -o template --template='{{(index .spec.ports 0).nodePort}}' services frontend -L name=frontend" \ -) - -echo "Guestbook app is on port $fe_port, will map it to port 80 on kube-00" - -./node_modules/.bin/azure vm endpoint create kube-00 80 $fe_port - -./node_modules/.bin/azure vm endpoint show kube-00 tcp-80-${fe_port} diff --git a/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js b/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js deleted file mode 100644 index 93402c10ed..0000000000 --- a/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js +++ /dev/null @@ -1,286 +0,0 @@ -var _ = require('underscore'); - -var fs = require('fs'); -var cp = require('child_process'); - -var yaml = require('js-yaml'); - -var openssl = require('openssl-wrapper'); - -var clr = require('colors'); -var inspect = require('util').inspect; - -var util = require('./util.js'); - -var coreos_image_ids = { - 'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-835.12.0', // untested - 'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-899.6.0', - 'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-942.0.0' // untested -}; - -var conf = {}; - -var hosts = { - collection: [], - ssh_port_counter: 2200, -}; - -var task_queue = []; - -exports.run_task_queue = function (dummy) { - var tasks = { - todo: task_queue, - done: [], - }; - - var pop_task = function() { - console.log(clr.yellow('azure_wrapper/task:'), clr.grey(inspect(tasks))); - var ret = {}; - ret.current = tasks.todo.shift(); - ret.remaining = tasks.todo.length; - return ret; - }; - - (function iter (task) { - if (task.current === undefined) { - if (conf.destroying === undefined) { - create_ssh_conf(); - save_state(); - } - return; - } else { - if (task.current.length !== 0) { - console.log(clr.yellow('azure_wrapper/exec:'), clr.blue(inspect(task.current))); - cp.fork('node_modules/azure-cli/bin/azure', task.current) - .on('exit', function (code, signal) { - tasks.done.push({ - code: code, - signal: signal, - what: task.current.join(' '), - remaining: task.remaining, - }); - if (code !== 0 && conf.destroying === undefined) { - console.log(clr.red('azure_wrapper/fail: Exiting due to an error.')); - save_state(); - console.log(clr.cyan('azure_wrapper/info: You probably want to destroy and re-run.')); - process.abort(); - } else { - iter(pop_task()); - } - }); - } else { - iter(pop_task()); - } - } - })(pop_task()); -}; - -var save_state = function () { - var file_name = util.join_output_file_path(conf.name, 'deployment.yml'); - try { - conf.hosts = hosts.collection; - fs.writeFileSync(file_name, yaml.safeDump(conf)); - console.log(clr.yellow('azure_wrapper/info: Saved state into `%s`'), file_name); - } catch (e) { - console.log(clr.red(e)); - } -}; - -var load_state = function (file_name) { - try { - conf = yaml.safeLoad(fs.readFileSync(file_name, 'utf8')); - console.log(clr.yellow('azure_wrapper/info: Loaded state from `%s`'), file_name); - return conf; - } catch (e) { - console.log(clr.red(e)); - } -}; - -var create_ssh_key = function (prefix) { - var opts = { - x509: true, - nodes: true, - newkey: 'rsa:2048', - subj: '/O=Weaveworks, Inc./L=London/C=GB/CN=weave.works', - keyout: util.join_output_file_path(prefix, 'ssh.key'), - out: util.join_output_file_path(prefix, 'ssh.pem'), - }; - openssl.exec('req', opts, function (err, buffer) { - if (err) console.log(clr.red(err)); - openssl.exec('rsa', { in: opts.keyout, out: opts.keyout }, function (err, buffer) { - if (err) console.log(clr.red(err)); - fs.chmod(opts.keyout, '0600', function (err) { - if (err) console.log(clr.red(err)); - }); - }); - }); - return { - key: opts.keyout, - pem: opts.out, - } -} - -var create_ssh_conf = function () { - var file_name = util.join_output_file_path(conf.name, 'ssh_conf'); - var ssh_conf_head = [ - "Host *", - "\tHostname " + conf.resources['service'] + ".cloudapp.net", - "\tUser core", - "\tCompression yes", - "\tLogLevel FATAL", - "\tStrictHostKeyChecking no", - "\tUserKnownHostsFile /dev/null", - "\tIdentitiesOnly yes", - "\tIdentityFile " + conf.resources['ssh_key']['key'], - "\n", - ]; - - fs.writeFileSync(file_name, ssh_conf_head.concat(_.map(hosts.collection, function (host) { - return _.template("Host <%= name %>\n\tPort <%= port %>\n")(host); - })).join('\n')); - console.log(clr.yellow('azure_wrapper/info:'), clr.green('Saved SSH config, you can use it like so: `ssh -F ', file_name, '`')); - console.log(clr.yellow('azure_wrapper/info:'), clr.green('The hosts in this deployment are:\n'), _.map(hosts.collection, function (host) { return host.name; })); -}; - -var get_location = function () { - if (process.env['AZ_AFFINITY']) { - return '--affinity-group=' + process.env['AZ_AFFINITY']; - } else if (process.env['AZ_LOCATION']) { - return '--location=' + process.env['AZ_LOCATION']; - } else { - return '--location=West Europe'; - } -} -var get_vm_size = function () { - if (process.env['AZ_VM_SIZE']) { - return '--vm-size=' + process.env['AZ_VM_SIZE']; - } else { - return '--vm-size=Small'; - } -} - -var get_subscription= function () { - if (process.env['AZ_SUBSCRIPTION']) { - return '--subscription=' + process.env['AZ_SUBSCRIPTION']; - } -} - -exports.queue_default_network = function () { - task_queue.push([ - 'network', 'vnet', 'create', - get_location(), - '--address-space=172.16.0.0', - get_subscription(), - conf.resources['vnet'], - ]); -} - -exports.queue_storage_if_needed = function() { - if (!process.env['AZURE_STORAGE_ACCOUNT']) { - conf.resources['storage_account'] = util.rand_suffix; - task_queue.push([ - 'storage', 'account', 'create', - '--type=LRS', - get_location(), - get_subscription(), - conf.resources['storage_account'], - ]); - process.env['AZURE_STORAGE_ACCOUNT'] = conf.resources['storage_account']; - } else { - // Preserve it for resizing, so we don't create a new one by accident, - // when the environment variable is unset - conf.resources['storage_account'] = process.env['AZURE_STORAGE_ACCOUNT']; - } -}; - -exports.queue_machines = function (name_prefix, coreos_update_channel, cloud_config_creator) { - var x = conf.nodes[name_prefix]; - var vm_create_base_args = [ - 'vm', 'create', - get_location(), - get_vm_size(), - '--connect=' + conf.resources['service'], - '--virtual-network-name=' + conf.resources['vnet'], - '--no-ssh-password', - '--ssh-cert=' + conf.resources['ssh_key']['pem'], - get_subscription(), - ]; - - var cloud_config = cloud_config_creator(x, conf); - - var next_host = function (n) { - hosts.ssh_port_counter += 1; - var host = { name: util.hostname(n, name_prefix), port: hosts.ssh_port_counter }; - if (cloud_config instanceof Array) { - host.cloud_config_file = cloud_config[n]; - } else { - host.cloud_config_file = cloud_config; - } - hosts.collection.push(host); - return _.map([ - "--vm-name=<%= name %>", - "--ssh=<%= port %>", - "--custom-data=<%= cloud_config_file %>", - ], function (arg) { return _.template(arg)(host); }); - }; - - task_queue = task_queue.concat(_(x).times(function (n) { - if (conf.resizing && n < conf.old_size) { - return []; - } else { - if (process.env['AZ_VM_COREOS_CHANNEL']) { - coreos_update_channel = process.env['AZ_VM_COREOS_CHANNEL'] - } - return vm_create_base_args.concat(next_host(n), [ - coreos_image_ids[coreos_update_channel], 'core', - ]); - } - })); -}; - -exports.create_config = function (name, nodes) { - conf = { - name: name, - nodes: nodes, - weave_salt: util.rand_string(), - resources: { - vnet: [name, 'internal-vnet', util.rand_suffix].join('-'), - service: [name, util.rand_suffix].join('-'), - ssh_key: create_ssh_key(name), - } - }; - -}; - -exports.destroy_cluster = function (state_file) { - load_state(state_file); - if (conf.hosts === undefined) { - console.log(clr.red('azure_wrapper/fail: Nothing to delete.')); - process.abort(); - } - - conf.destroying = true; - task_queue = _.map(conf.hosts, function (host) { - return ['vm', 'delete', '--quiet', '--blob-delete', host.name, get_subscription()]; - }); - - task_queue.push(['network', 'vnet', 'delete', '--quiet', conf.resources['vnet'], get_subscription()]); - task_queue.push(['storage', 'account', 'delete', '--quiet', conf.resources['storage_account'], get_subscription()]); - - exports.run_task_queue(); -}; - -exports.load_state_for_resizing = function (state_file, node_type, new_nodes) { - load_state(state_file); - if (conf.hosts === undefined) { - console.log(clr.red('azure_wrapper/fail: Nothing to look at.')); - process.abort(); - } - conf.resizing = true; - conf.old_size = conf.nodes[node_type]; - conf.old_state_file = state_file; - conf.nodes[node_type] += new_nodes; - hosts.collection = conf.hosts; - hosts.ssh_port_counter += conf.hosts.length; - process.env['AZURE_STORAGE_ACCOUNT'] = conf.resources['storage_account']; -} diff --git a/docs/getting-started-guides/coreos/azure/lib/cloud_config.js b/docs/getting-started-guides/coreos/azure/lib/cloud_config.js deleted file mode 100644 index d08b3f06ae..0000000000 --- a/docs/getting-started-guides/coreos/azure/lib/cloud_config.js +++ /dev/null @@ -1,58 +0,0 @@ -var _ = require('underscore'); -var fs = require('fs'); -var yaml = require('js-yaml'); -var colors = require('colors/safe'); - -var write_cloud_config_from_object = function (data, output_file) { - try { - fs.writeFileSync(output_file, [ - '#cloud-config', - yaml.safeDump(data), - ].join("\n")); - return output_file; - } catch (e) { - console.log(colors.red(e)); - } -}; - -exports.generate_environment_file_entry_from_object = function (hostname, environ) { - var data = { - hostname: hostname, - environ_array: _.map(environ, function (value, key) { - return [key.toUpperCase(), JSON.stringify(value.toString())].join('='); - }), - }; - - return { - permissions: '0600', - owner: 'root', - content: _.template("<%= environ_array.join('\\n') %>\n")(data), - path: _.template("/etc/weave.<%= hostname %>.env")(data), - }; -}; - -exports.process_template = function (input_file, output_file, processor) { - var data = {}; - try { - data = yaml.safeLoad(fs.readFileSync(input_file, 'utf8')); - } catch (e) { - console.log(colors.red(e)); - } - return write_cloud_config_from_object(processor(_.clone(data)), output_file); -}; - -exports.write_files_from = function (local_dir, remote_dir) { - try { - return _.map(fs.readdirSync(local_dir), function (fn) { - return { - path: [remote_dir, fn].join('/'), - owner: 'root', - permissions: '0640', - encoding: 'base64', - content: fs.readFileSync([local_dir, fn].join('/')).toString('base64'), - }; - }); - } catch (e) { - console.log(colors.red(e)); - } -}; diff --git a/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js b/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js deleted file mode 100644 index 2002b43a53..0000000000 --- a/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js +++ /dev/null @@ -1,77 +0,0 @@ -var _ = require('underscore'); -_.mixin(require('underscore.string').exports()); - -var util = require('../util.js'); -var cloud_config = require('../cloud_config.js'); - - -etcd_initial_cluster_conf_self = function (conf) { - var port = '2380'; - - var data = { - nodes: _(conf.nodes.etcd).times(function (n) { - var host = util.hostname(n, 'etcd'); - return [host, [host, port].join(':')].join('=http://'); - }), - }; - - return { - 'name': 'etcd2.service', - 'drop-ins': [{ - 'name': '50-etcd-initial-cluster.conf', - 'content': _.template("[Service]\nEnvironment=ETCD_INITIAL_CLUSTER=<%= nodes.join(',') %>\n")(data), - }], - }; -}; - -etcd_initial_cluster_conf_kube = function (conf) { - var port = '4001'; - - var data = { - nodes: _(conf.nodes.etcd).times(function (n) { - var host = util.hostname(n, 'etcd'); - return 'http://' + [host, port].join(':'); - }), - }; - - return { - 'name': 'kube-apiserver.service', - 'drop-ins': [{ - 'name': '50-etcd-initial-cluster.conf', - 'content': _.template("[Service]\nEnvironment=ETCD_SERVERS=--etcd-servers=<%= nodes.join(',') %>\n")(data), - }], - }; -}; - -exports.create_etcd_cloud_config = function (node_count, conf) { - var input_file = './cloud_config_templates/kubernetes-cluster-etcd-node-template.yml'; - var output_file = util.join_output_file_path('kubernetes-cluster-etcd-nodes', 'generated.yml'); - - return cloud_config.process_template(input_file, output_file, function(data) { - data.coreos.units.push(etcd_initial_cluster_conf_self(conf)); - return data; - }); -}; - -exports.create_node_cloud_config = function (node_count, conf) { - var elected_node = 0; - - var input_file = './cloud_config_templates/kubernetes-cluster-main-nodes-template.yml'; - var output_file = util.join_output_file_path('kubernetes-cluster-main-nodes', 'generated.yml'); - - var make_node_config = function (n) { - return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { - weave_password: conf.weave_salt, - weave_peers: n === elected_node ? "" : util.hostname(elected_node, 'kube'), - breakout_route: util.ipv4([10, 2, 0, 0], 16), - bridge_address_cidr: util.ipv4([10, 2, n, 1], 24), - }); - }; - - var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); - return cloud_config.process_template(input_file, output_file, function(data) { - data.write_files = data.write_files.concat(_(node_count).times(make_node_config), write_files_extra); - data.coreos.units.push(etcd_initial_cluster_conf_kube(conf)); - return data; - }); -}; diff --git a/docs/getting-started-guides/coreos/azure/lib/util.js b/docs/getting-started-guides/coreos/azure/lib/util.js deleted file mode 100644 index 2c88b8cff3..0000000000 --- a/docs/getting-started-guides/coreos/azure/lib/util.js +++ /dev/null @@ -1,33 +0,0 @@ -var _ = require('underscore'); -_.mixin(require('underscore.string').exports()); - -exports.ipv4 = function (ocets, prefix) { - return { - ocets: ocets, - prefix: prefix, - toString: function () { - return [ocets.join('.'), prefix].join('/'); - } - } -}; - -exports.hostname = function hostname (n, prefix) { - return _.template("<%= pre %>-<%= seq %>")({ - pre: prefix || 'core', - seq: _.pad(n, 2, '0'), - }); -}; - -exports.rand_string = function () { - var crypto = require('crypto'); - var shasum = crypto.createHash('sha256'); - shasum.update(crypto.randomBytes(256)); - return shasum.digest('hex'); -}; - - -exports.rand_suffix = exports.rand_string().substring(50); - -exports.join_output_file_path = function(prefix, suffix) { - return './output/' + [prefix, exports.rand_suffix, suffix].join('_'); -}; diff --git a/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js b/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js deleted file mode 100755 index f606898874..0000000000 --- a/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env node - -var azure = require('./lib/azure_wrapper.js'); -var kube = require('./lib/deployment_logic/kubernetes.js'); - -azure.load_state_for_resizing(process.argv[2], 'kube', parseInt(process.argv[3] || 1)); - -azure.run_task_queue([ - azure.queue_machines('kube', 'stable', kube.create_node_cloud_config), -]); From 2b1f051de5c9d7e393b63281342483f38852cdfe Mon Sep 17 00:00:00 2001 From: Ilya Dmitrichenko Date: Mon, 4 Apr 2016 17:37:34 +0100 Subject: [PATCH 2/2] coreos/azure: Convert to UNIX EOL, `git clone` external repo (which provides v1.2 now) --- .../coreos/azure/index.md | 493 +++++++++--------- 1 file changed, 246 insertions(+), 247 deletions(-) diff --git a/docs/getting-started-guides/coreos/azure/index.md b/docs/getting-started-guides/coreos/azure/index.md index 7101ae7ee7..589cf81fcc 100644 --- a/docs/getting-started-guides/coreos/azure/index.md +++ b/docs/getting-started-guides/coreos/azure/index.md @@ -1,247 +1,246 @@ ---- ---- - -* TOC -{:toc} - - -In this guide I will demonstrate how to deploy a Kubernetes cluster to Azure cloud. You will be using CoreOS with Weave, which implements simple and secure networking, in a transparent, yet robust way. The purpose of this guide is to provide an out-of-the-box implementation that can ultimately be taken into production with little change. It will demonstrate how to provision a dedicated Kubernetes master and etcd nodes, and show how to scale the cluster with ease. - -### Prerequisites - -1. You need an Azure account. - -## Let's go! - -To get started, you need to checkout the code: - -```shell -git clone https://github.com/kubernetes/kubernetes -cd kubernetes/docs/getting-started-guides/coreos/azure/ -``` - -You will need to have [Node.js installed](http://nodejs.org/download/) on you machine. If you have previously used Azure CLI, you should have it already. - -First, you need to install some of the dependencies with - -```shell -npm install -``` - -Now, all you need to do is: - -```shell -./azure-login.js -u -./create-kubernetes-cluster.js -``` - -This script will provision a cluster suitable for production use, where there is a ring of 3 dedicated etcd nodes: 1 kubernetes master and 2 kubernetes nodes. The `kube-00` VM will be the master, your work loads are only to be deployed on the nodes, `kube-01` and `kube-02`. Initially, all VMs are single-core, to ensure a user of the free tier can reproduce it without paying extra. I will show how to add more bigger VMs later. -If you need to pass Azure specific options for the creation script you can do this via additional environment variables e.g. - -```shell -AZ_SUBSCRIPTION= AZ_LOCATION="East US" ./create-kubernetes-cluster.js -# or -AZ_VM_COREOS_CHANNEL=beta ./create-kubernetes-cluster.js -``` - -![VMs in Azure](/images/docs/initial_cluster.png) - -Once the creation of Azure VMs has finished, you should see the following: - -```shell -... -azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_1c1496016083b4_ssh_conf ` -azure_wrapper/info: The hosts in this deployment are: - [ 'etcd-00', 'etcd-01', 'etcd-02', 'kube-00', 'kube-01', 'kube-02' ] -azure_wrapper/info: Saved state into `./output/kube_1c1496016083b4_deployment.yml` -``` - -Let's login to the master node like so: - -```shell -ssh -F ./output/kube_1c1496016083b4_ssh_conf kube-00 -``` - -> Note: config file name will be different, make sure to use the one you see. - -Check there are 2 nodes in the cluster: - -```shell -core@kube-00 ~ $ kubectl get nodes -NAME LABELS STATUS -kube-01 kubernetes.io/hostname=kube-01 Ready -kube-02 kubernetes.io/hostname=kube-02 Ready -``` - -## Deploying the workload - -Let's follow the Guestbook example now: - -```shell -kubectl create -f ~/guestbook-example -``` - -You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Pending` to `Running`. - -```shell -kubectl get pods --watch -``` - -> Note: the most time it will spend downloading Docker container images on each of the nodes. - -Eventually you should see: - -```shell -NAME READY STATUS RESTARTS AGE -frontend-0a9xi 1/1 Running 0 4m -frontend-4wahe 1/1 Running 0 4m -frontend-6l36j 1/1 Running 0 4m -redis-master-talmr 1/1 Running 0 4m -redis-slave-12zfd 1/1 Running 0 4m -redis-slave-3nbce 1/1 Running 0 4m -``` - -## Scaling - -Two single-core nodes are certainly not enough for a production system of today. Let's scale the cluster by adding a couple of bigger nodes. - -You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/kubernetes/docs/getting-started-guides/coreos/azure/`). - -First, lets set the size of new VMs: - -```shell -export AZ_VM_SIZE=Large -``` - -Now, run scale script with state file of the previous deployment and number of nodes to add: - -```shell -core@kube-00 ~ $ ./scale-kubernetes-cluster.js ./output/kube_1c1496016083b4_deployment.yml 2 -... -azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_8f984af944f572_ssh_conf ` -azure_wrapper/info: The hosts in this deployment are: - [ 'etcd-00', - 'etcd-01', - 'etcd-02', - 'kube-00', - 'kube-01', - 'kube-02', - 'kube-03', - 'kube-04' ] -azure_wrapper/info: Saved state into `./output/kube_8f984af944f572_deployment.yml` -``` - -> Note: this step has created new files in `./output`. - -Back on `kube-00`: - -```shell -core@kube-00 ~ $ kubectl get nodes -NAME LABELS STATUS -kube-01 kubernetes.io/hostname=kube-01 Ready -kube-02 kubernetes.io/hostname=kube-02 Ready -kube-03 kubernetes.io/hostname=kube-03 Ready -kube-04 kubernetes.io/hostname=kube-04 Ready -``` - -You can see that two more nodes joined happily. Let's scale the number of Guestbook instances now. - -First, double-check how many replication controllers there are: - -```shell -core@kube-00 ~ $ kubectl get rc -ONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3 -redis-master master redis name=redis-master 1 -redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 2 -``` - -As there are 4 nodes, let's scale proportionally: - -```shell -core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave ->>>>>>> coreos/azure: Updates for 1.0 -scaled -core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend -scaled -``` - -Check what you have now: - -```shell -core@kube-00 ~ $ kubectl get rc -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 4 -redis-master master redis name=redis-master 1 -redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 4 -``` - -You now will have more instances of front-end Guestbook apps and Redis slaves; and, if you look up all pods labeled `name=frontend`, you should see one running on each node. - -```shell -core@kube-00 ~/guestbook-example $ kubectl get pods -l name=frontend -NAME READY STATUS RESTARTS AGE -frontend-0a9xi 1/1 Running 0 22m -frontend-4wahe 1/1 Running 0 22m -frontend-6l36j 1/1 Running 0 22m -frontend-z9oxo 1/1 Running 0 41s -``` - -## Exposing the app to the outside world - -There is no native Azure load-balancer support in Kubernetes 1.0, however here is how you can expose the Guestbook app to the Internet. - -```shell -./expose_guestbook_app_port.sh ./output/kube_1c1496016083b4_ssh_conf -Guestbook app is on port 31605, will map it to port 80 on kube-00 -info: Executing command vm endpoint create -+ Getting virtual machines -+ Reading network configuration -+ Updating network configuration -info: vm endpoint create command OK -info: Executing command vm endpoint show -+ Getting virtual machines -data: Name : tcp-80-31605 -data: Local port : 31605 -data: Protcol : tcp -data: Virtual IP Address : 137.117.156.164 -data: Direct server return : Disabled -info: vm endpoint show command OK -``` - -You then should be able to access it from anywhere via the Azure virtual IP for `kube-00` displayed above, i.e. `http://137.117.156.164/` in my case. - -## Next steps - -You now have a full-blow cluster running in Azure, congrats! - -You should probably try deploy other [example apps](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) or write your own ;) - -## Tear down... - -If you don't wish care about the Azure bill, you can tear down the cluster. It's easy to redeploy it, as you can see. - -```shell -./destroy-cluster.js ./output/kube_8f984af944f572_deployment.yml -``` - -> Note: make sure to use the _latest state file_, as after scaling there is a new one. - -By the way, with the scripts shown, you can deploy multiple clusters, if you like :) - -## Support Level - - -IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level --------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- -Azure | CoreOS | CoreOS | Weave | [docs](/docs/getting-started-guides/coreos/azure/) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin)) - - -For support level information on all solutions, see the [Table of solutions](/docs/getting-started-guides/#table-of-solutions) chart. - - -## Further reading - -Please see the [Kubernetes docs](/docs/) for more details on administering -and using a Kubernetes cluster - +--- +--- + +* TOC +{:toc} + + +In this guide I will demonstrate how to deploy a Kubernetes cluster to Azure cloud. You will be using CoreOS with Weave, which implements simple and secure networking, in a transparent, yet robust way. The purpose of this guide is to provide an out-of-the-box implementation that can ultimately be taken into production with little change. It will demonstrate how to provision a dedicated Kubernetes master and etcd nodes, and show how to scale the cluster with ease. + +### Prerequisites + +1. You need an Azure account. + +## Let's go! + +To get started, you need to checkout the code: + +```shell +https://github.com/weaveworks-guides/weave-kubernetes-coreos-azure +cd weave-kubernetes-coreos-azure +``` + +You will need to have [Node.js installed](http://nodejs.org/download/) on you machine. If you have previously used Azure CLI, you should have it already. + +First, you need to install some of the dependencies with + +```shell +npm install +``` + +Now, all you need to do is: + +```shell +./azure-login.js -u +./create-kubernetes-cluster.js +``` + +This script will provision a cluster suitable for production use, where there is a ring of 3 dedicated etcd nodes: 1 kubernetes master and 2 kubernetes nodes. The `kube-00` VM will be the master, your work loads are only to be deployed on the nodes, `kube-01` and `kube-02`. Initially, all VMs are single-core, to ensure a user of the free tier can reproduce it without paying extra. I will show how to add more bigger VMs later. +If you need to pass Azure specific options for the creation script you can do this via additional environment variables e.g. + +```shell +AZ_SUBSCRIPTION= AZ_LOCATION="East US" ./create-kubernetes-cluster.js +# or +AZ_VM_COREOS_CHANNEL=beta ./create-kubernetes-cluster.js +``` + +![VMs in Azure](/images/docs/initial_cluster.png) + +Once the creation of Azure VMs has finished, you should see the following: + +```shell +... +azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_1c1496016083b4_ssh_conf ` +azure_wrapper/info: The hosts in this deployment are: + [ 'etcd-00', 'etcd-01', 'etcd-02', 'kube-00', 'kube-01', 'kube-02' ] +azure_wrapper/info: Saved state into `./output/kube_1c1496016083b4_deployment.yml` +``` + +Let's login to the master node like so: + +```shell +ssh -F ./output/kube_1c1496016083b4_ssh_conf kube-00 +``` + +> Note: config file name will be different, make sure to use the one you see. + +Check there are 2 nodes in the cluster: + +```shell +core@kube-00 ~ $ kubectl get nodes +NAME LABELS STATUS +kube-01 kubernetes.io/hostname=kube-01 Ready +kube-02 kubernetes.io/hostname=kube-02 Ready +``` + +## Deploying the workload + +Let's follow the Guestbook example now: + +```shell +kubectl create -f ~/guestbook-example +``` + +You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Pending` to `Running`. + +```shell +kubectl get pods --watch +``` + +> Note: the most time it will spend downloading Docker container images on each of the nodes. + +Eventually you should see: + +```shell +NAME READY STATUS RESTARTS AGE +frontend-0a9xi 1/1 Running 0 4m +frontend-4wahe 1/1 Running 0 4m +frontend-6l36j 1/1 Running 0 4m +redis-master-talmr 1/1 Running 0 4m +redis-slave-12zfd 1/1 Running 0 4m +redis-slave-3nbce 1/1 Running 0 4m +``` + +## Scaling + +Two single-core nodes are certainly not enough for a production system of today. Let's scale the cluster by adding a couple of bigger nodes. + +You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/kubernetes/docs/getting-started-guides/coreos/azure/`). + +First, lets set the size of new VMs: + +```shell +export AZ_VM_SIZE=Large +``` + +Now, run scale script with state file of the previous deployment and number of nodes to add: + +```shell +core@kube-00 ~ $ ./scale-kubernetes-cluster.js ./output/kube_1c1496016083b4_deployment.yml 2 +... +azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_8f984af944f572_ssh_conf ` +azure_wrapper/info: The hosts in this deployment are: + [ 'etcd-00', + 'etcd-01', + 'etcd-02', + 'kube-00', + 'kube-01', + 'kube-02', + 'kube-03', + 'kube-04' ] +azure_wrapper/info: Saved state into `./output/kube_8f984af944f572_deployment.yml` +``` + +> Note: this step has created new files in `./output`. + +Back on `kube-00`: + +```shell +core@kube-00 ~ $ kubectl get nodes +NAME LABELS STATUS +kube-01 kubernetes.io/hostname=kube-01 Ready +kube-02 kubernetes.io/hostname=kube-02 Ready +kube-03 kubernetes.io/hostname=kube-03 Ready +kube-04 kubernetes.io/hostname=kube-04 Ready +``` + +You can see that two more nodes joined happily. Let's scale the number of Guestbook instances now. + +First, double-check how many replication controllers there are: + +```shell +core@kube-00 ~ $ kubectl get rc +ONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3 +redis-master master redis name=redis-master 1 +redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 2 +``` + +As there are 4 nodes, let's scale proportionally: + +```shell +core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave +scaled +core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend +scaled +``` + +Check what you have now: + +```shell +core@kube-00 ~ $ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 4 +redis-master master redis name=redis-master 1 +redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 4 +``` + +You now will have more instances of front-end Guestbook apps and Redis slaves; and, if you look up all pods labeled `name=frontend`, you should see one running on each node. + +```shell +core@kube-00 ~/guestbook-example $ kubectl get pods -l name=frontend +NAME READY STATUS RESTARTS AGE +frontend-0a9xi 1/1 Running 0 22m +frontend-4wahe 1/1 Running 0 22m +frontend-6l36j 1/1 Running 0 22m +frontend-z9oxo 1/1 Running 0 41s +``` + +## Exposing the app to the outside world + +There is no native Azure load-balancer support in Kubernetes 1.0, however here is how you can expose the Guestbook app to the Internet. + +```shell +./expose_guestbook_app_port.sh ./output/kube_1c1496016083b4_ssh_conf +Guestbook app is on port 31605, will map it to port 80 on kube-00 +info: Executing command vm endpoint create ++ Getting virtual machines ++ Reading network configuration ++ Updating network configuration +info: vm endpoint create command OK +info: Executing command vm endpoint show ++ Getting virtual machines +data: Name : tcp-80-31605 +data: Local port : 31605 +data: Protcol : tcp +data: Virtual IP Address : 137.117.156.164 +data: Direct server return : Disabled +info: vm endpoint show command OK +``` + +You then should be able to access it from anywhere via the Azure virtual IP for `kube-00` displayed above, i.e. `http://137.117.156.164/` in my case. + +## Next steps + +You now have a full-blown cluster running in Azure, congrats! + +You should probably try deploy other [example apps](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) or write your own ;) + +## Tear down... + +If you don't wish care about the Azure bill, you can tear down the cluster. It's easy to redeploy it, as you can see. + +```shell +./destroy-cluster.js ./output/kube_8f984af944f572_deployment.yml +``` + +> Note: make sure to use the _latest state file_, as after scaling there is a new one. + +By the way, with the scripts shown, you can deploy multiple clusters, if you like :) + +## Support Level + + +IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level +-------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- +Azure | CoreOS | CoreOS | Weave | [docs](/docs/getting-started-guides/coreos/azure/) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin)) + + +For support level information on all solutions, see the [Table of solutions](/docs/getting-started-guides/#table-of-solutions) chart. + + +## Further reading + +Please see the [Kubernetes docs](/docs/) for more details on administering +and using a Kubernetes cluster +