From b103f0f27996975e0f8dd915e7ed71ada2328765 Mon Sep 17 00:00:00 2001 From: Andy Zheng Date: Mon, 1 Feb 2016 11:36:45 -0800 Subject: [PATCH] Support master on Ubuntu Trusty This change support running kubernetes master on Ubuntu Trusty. It uses pure cloud-config and shell scripts, and completely gets rid of saltstack or the release salt tarball. --- build/common.sh | 30 +- cluster/gce/trusty/configure.sh | 466 +++++++++++++++++- cluster/gce/trusty/helper.sh | 38 +- .../kube-manifests/kube-apiserver.manifest | 98 ++++ .../kube-controller-manager.manifest | 83 ++++ cluster/gce/trusty/master.yaml | 235 +++++++++ cluster/gce/trusty/node.yaml | 34 +- .../saltbase/salt/kube-addons/kube-addons.sh | 8 +- 8 files changed, 945 insertions(+), 47 deletions(-) create mode 100644 cluster/gce/trusty/kube-manifests/kube-apiserver.manifest create mode 100644 cluster/gce/trusty/kube-manifests/kube-controller-manager.manifest create mode 100644 cluster/gce/trusty/master.yaml diff --git a/build/common.sh b/build/common.sh index b653ea298c..ead0c982db 100755 --- a/build/common.sh +++ b/build/common.sh @@ -883,27 +883,36 @@ function kube::release::package_salt_tarball() { # such as Ubuntu Trusty. # # There are two sources of manifests files: (1) some manifests in the directory -# cluster/saltbase/salt can be used directly or after minor revision, so we copy -# them from there; (2) otherwise, we will maintain separate copies in -# cluster/gce/kube-manifests. +# cluster/saltbase/salt and cluster/addons can be used directly or after minor +# revision, so we copy them from there; (2) otherwise, we will maintain separate +# copies in cluster/gce//kube-manifests. function kube::release::package_kube_manifests_tarball() { kube::log::status "Building tarball: manifests" local release_stage="${RELEASE_STAGE}/manifests/kubernetes" rm -rf "${release_stage}" - mkdir -p "${release_stage}" + mkdir -p "${release_stage}/trusty" - # Source 1: manifests from cluster/saltbase/salt. - # TODO(andyzheng0831): Add more manifests when supporting master on trusty. + # Source 1: manifests from cluster/saltbase/salt and cluster/addons local salt_dir="${KUBE_ROOT}/cluster/saltbase/salt" cp "${salt_dir}/fluentd-es/fluentd-es.yaml" "${release_stage}/" cp "${salt_dir}/fluentd-gcp/fluentd-gcp.yaml" "${release_stage}/" cp "${salt_dir}/kube-registry-proxy/kube-registry-proxy.yaml" "${release_stage}/" cp "${salt_dir}/kube-proxy/kube-proxy.manifest" "${release_stage}/" + cp "${salt_dir}/etcd/etcd.manifest" "${release_stage}/trusty" + cp "${salt_dir}/kube-scheduler/kube-scheduler.manifest" "${release_stage}/trusty" + cp "${salt_dir}/kube-addons/namespace.yaml" "${release_stage}/trusty" + cp "${salt_dir}/kube-addons/kube-addons.sh" "${release_stage}/trusty" + cp "${salt_dir}/kube-addons/kube-addon-update.sh" "${release_stage}/trusty" + cp -r "${salt_dir}/kube-admission-controls/limit-range" "${release_stage}/trusty" + local objects + objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo) + tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${release_stage}/trusty" - # Source 2: manifests from cluster/gce/kube-manifests. - # TODO(andyzheng0831): Enable the following line after finishing issue #16702. - # cp "${KUBE_ROOT}/cluster/gce/kube-manifests/*" "${release_stage}/" + # Source 2: manifests from cluster/gce//kube-manifests. + # TODO(andyzheng0831): Avoid using separate copies for trusty. We should use whatever + # from cluster/saltbase/salt to minimize maintenance cost. + cp "${KUBE_ROOT}/cluster/gce/trusty/kube-manifests/"* "${release_stage}/trusty" cp -r "${KUBE_ROOT}/cluster/gce/coreos/kube-manifests"/* "${release_stage}/" kube::release::clean_cruft @@ -1094,10 +1103,11 @@ function kube::release::gcs::copy_release_artifacts() { # Stage everything in release directory kube::release::gcs::stage_and_hash "${RELEASE_DIR}"/* . || return 1 - # Having the configure-vm.sh and trusty/node.yaml scripts from the GCE cluster + # Having the configure-vm.sh script and and trusty code from the GCE cluster # deploy hosted with the release is useful for GKE. kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/configure-vm.sh" extra/gce || return 1 kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/node.yaml" extra/gce || return 1 + kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/master.yaml" extra/gce || return 1 kube::release::gcs::stage_and_hash "${RELEASE_STAGE}/full/kubernetes/cluster/gce/trusty/configure.sh" extra/gce || return 1 # Upload the "naked" binaries to GCS. This is useful for install scripts that diff --git a/cluster/gce/trusty/configure.sh b/cluster/gce/trusty/configure.sh index a2471169b8..d2fdda0cda 100644 --- a/cluster/gce/trusty/configure.sh +++ b/cluster/gce/trusty/configure.sh @@ -53,16 +53,18 @@ create_dirs() { download_kube_env() { # Fetch kube-env from GCE metadata server. + readonly tmp_install_dir="/var/cache/kubernetes-install" + mkdir -p ${tmp_install_dir} curl --fail --silent --show-error \ -H "X-Google-Metadata-Request: True" \ - -o /tmp/kube-env-yaml \ + -o "${tmp_install_dir}/kube_env.yaml" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env # Convert the yaml format file into a shell-style file. eval $(python -c ''' import pipes,sys,yaml for k,v in yaml.load(sys.stdin).iteritems(): print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))) -''' < /tmp/kube-env-yaml > /etc/kube-env) +''' < "${tmp_install_dir}/kube_env.yaml" > /etc/kube-env) } create_kubelet_kubeconfig() { @@ -181,6 +183,11 @@ install_kube_binary_config() { cp /tmp/kubernetes/server/bin/*.docker_tag /run/kube-docker-files/ if [ "${KUBERNETES_MASTER:-}" = "false" ]; then cp /tmp/kubernetes/server/bin/kube-proxy.tar /run/kube-docker-files/ + else + cp /tmp/kubernetes/server/bin/kube-apiserver.tar /run/kube-docker-files/ + cp /tmp/kubernetes/server/bin/kube-controller-manager.tar /run/kube-docker-files/ + cp /tmp/kubernetes/server/bin/kube-scheduler.tar /run/kube-docker-files/ + cp -r /tmp/kubernetes/addons /run/kube-docker-files/ fi # For a testing cluster, we use kubelet, kube-proxy, and kubectl binaries # from the release tarball and place them in /usr/local/bin. For a non-test @@ -246,3 +253,458 @@ prepare_log_file() { chmod 644 $1 chown root:root $1 } + +# It monitors the health of several master and node components. +health_monitoring() { + sleep_seconds=10 + max_seconds=10 + # We simply kill the process when there is a failure. Another upstart job will automatically + # restart the process. + while [ 1 ]; do + if ! timeout 20 docker ps > /dev/null; then + echo "Docker daemon failed!" + pkill docker + fi + if ! curl --insecure -m ${max_seconds} -f -s https://127.0.0.1:${KUBELET_PORT:-10250}/healthz > /dev/null; then + echo "Kubelet is unhealthy!" + pkill kubelet + fi + # TODO(andyzheng0831): Add master side health monitoring. + sleep ${sleep_seconds} + done +} + + +########## The functions below are for master only ########## + +# Mounts a persistent disk (formatting if needed) to store the persistent data +# on the master -- etcd's data, a few settings, and security certs/keys/tokens. +# safe_format_and_mount only formats an unformatted disk, and mkdir -p will +# leave a directory be if it already exists. +mount_master_pd() { + readonly pd_path="/dev/disk/by-id/google-master-pd" + readonly mount_point="/mnt/disks/master-pd" + + # TODO(zmerlynn): GKE is still lagging in master-pd creation + if [ ! -e ${pd_path} ]; then + return + fi + # Format and mount the disk, create directories on it for all of the master's + # persistent data, and link them to where they're used. + mkdir -p ${mount_point} + /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" ${pd_path} ${mount_point} >/var/log/master-pd-mount.log || \ + { echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; } + # Contains all the data stored in etcd + mkdir -m 700 -p "${mount_point}/var/etcd" + # Contains the dynamically generated apiserver auth certs and keys + mkdir -p "${mount_point}/etc/srv/kubernetes" + # Directory for kube-apiserver to store SSH key (if necessary) + mkdir -p /"${mount_point}/etc/srv/sshproxy" + ln -s -f "${mount_point}/var/etcd" /var/etcd + mkdir -p /etc/srv + ln -s -f /"${mount_point}/etc/srv/kubernetes" /etc/srv/kubernetes + ln -s -f /"${mount_point}/etc/srv/sshproxy" /etc/srv/sshproxy + + if ! id etcd &>/dev/null; then + useradd -s /sbin/nologin -d /var/etcd etcd + fi + chown -R etcd /"${mount_point}/var/etcd" + chgrp -R etcd "${mount_point}/var/etcd" +} + +# A helper function that adds an entry to a token file. +# $1: account information +# $2: token file +add_token_entry() { + current_token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${tcurrent_token},$1,$1" >> $2 +} + +# After the first boot and on upgrade, these files exists on the master-pd +# and should never be touched again (except perhaps an additional service +# account, see NB below.) +create_master_auth() { + readonly auth_dir="/etc/srv/kubernetes" + if [ ! -e "${auth_dir}/ca.crt" ]; then + if [ ! -z "${CA_CERT:-}" ] && [ ! -z "${MASTER_CERT:-}" ] && [ ! -z "${MASTER_KEY:-}" ]; then + echo "${CA_CERT}" | base64 -d > "${auth_dir}/ca.crt" + echo "${MASTER_CERT}" | base64 -d > "${auth_dir}/server.cert" + echo "${MASTER_KEY}" | base64 -d > "${auth_dir}/server.key" + # Kubecfg cert/key are optional and included for backwards compatibility. + # TODO(roberthbailey): Remove these two lines once GKE no longer requires + # fetching clients certs from the master VM. + echo "${KUBECFG_CERT:-}" | base64 -d > "${auth_dir}/kubecfg.crt" + echo "${KUBECFG_KEY:-}" | base64 -d > "${auth_dir}/kubecfg.key" + fi + fi + readonly basic_auth_csv="${auth_dir}/basic_auth.csv" + if [ ! -e "${basic_auth_csv}" ]; then + echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${basic_auth_csv}" + fi + readonly known_tokens_csv="${auth_dir}/known_tokens.csv" + if [ ! -e "${known_tokens_csv}" ]; then + echo "${KUBE_BEARER_TOKEN},admin,admin" > "${known_tokens_csv}" + echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${known_tokens_csv}" + echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${known_tokens_csv}" + + # Generate tokens for other "service accounts". Append to known_tokens. + # + # NB: If this list ever changes, this script actually has to + # change to detect the existence of this file, kill any deleted + # old tokens and add any new tokens (to handle the upgrade case). + add_token_entry "system:scheduler" ${known_tokens_csv} + add_token_entry "system:controller_manager" ${known_tokens_csv} + add_token_entry "system:logging" ${known_tokens_csv} + add_token_entry "system:monitoring" ${known_tokens_csv} + add_token_entry "system:dns" ${known_tokens_csv} + fi + + if [ -n "${PROJECT_ID:-}" ] && [ -n "${TOKEN_URL:-}" ] && [ -n "${TOKEN_BODY:-}" ] && [ -n "${NODE_NETWORK:-}" ]; then + cat </etc/gce.conf +[global] +token-url = ${TOKEN_URL} +token-body = ${TOKEN_BODY} +project-id = ${PROJECT_ID} +network-name = ${NODE_NETWORK} +EOF + fi +} + +# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY +# to generate a kubeconfig file for the kubelet to securely connect to the apiserver. +create_master_kubelet_auth() { + # Only configure the kubelet on the master if the required variables are + # set in the environment. + if [ -n "${KUBELET_APISERVER:-}" ] && [ -n "${KUBELET_CERT:-}" ] && [ -n "${KUBELET_KEY:-}" ]; then + create_kubelet_kubeconfig + fi +} + +# Replaces the variables in the etcd manifest file with the real values, and then +# copy the file to the manifest dir +# $1: value for variable 'suffix' +# $2: value for variable 'port' +# $3: value for variable 'server_port' +# $4: value for variable 'cpulimit' +# $5: pod name, which should be either etcd or etcd-events +prepare_etcd_manifest() { + readonly etcd_temp_file="/tmp/$5" + cp /run/kube-manifests/kubernetes/trusty/etcd.manifest "${etcd_temp_file}" + sed -i -e "s@{{ *suffix *}}@$1@g" "${etcd_temp_file}" + sed -i -e "s@{{ *port *}}@$2@g" "${etcd_temp_file}" + sed -i -e "s@{{ *server_port *}}@$3@g" "${etcd_temp_file}" + sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${etcd_temp_file}" + # Replace the volume host path + sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${etcd_temp_file}" + mv "${etcd_temp_file}" /etc/kubernetes/manifests +} + +# Starts etcd server pod (and etcd-events pod if needed). +# More specifically, it prepares dirs and files, sets the variable value +# in the manifests, and copies them to /etc/kubernetes/manifests. +start_etcd_servers() { + if [ -d /etc/etcd ]; then + rm -rf /etc/etcd + fi + if [ -e /etc/default/etcd ]; then + rm -f /etc/default/etcd + fi + if [ -e /etc/systemd/system/etcd.service ]; then + rm -f /etc/systemd/system/etcd.service + fi + if [ -e /etc/init.d/etcd ]; then + rm -f /etc/init.d/etcd + fi + prepare_log_file /var/log/etcd.log + prepare_etcd_manifest "" "4001" "2380" "200m" "etcd.manifest" + + # Switch on the second etcd instance if there are more than 50 nodes. + if [ -n "${NUM_NODES:-}" ] && [ "${NUM_NODES}" -gt 50 ]; then + prepare_log_file /var/log/etcd-events.log + prepare_etcd_manifest "-events" "4002" "2381" "100m" "etcd-events.manifest" + fi +} + +# Calculates the following variables based on env variables, which will be used +# by the manifests of several kube-master components. +# CLOUD_CONFIG_VOLUME +# CLOUD_CONFIG_MOUNT +# DOCKER_REGISTRY +compute_master_manifest_variables() { + CLOUD_CONFIG_VOLUME="" + CLOUD_CONFIG_MOUNT="" + if [ -n "${PROJECT_ID:-}" ] && [ -n "${TOKEN_URL:-}" ] && [ -n "${TOKEN_BODY:-}" ] && [ -n "${NODE_NETWORK:-}" ]; then + CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\"}}," + CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true}," + fi + DOCKER_REGISTRY="gcr.io/google_containers" + if [ -n "${KUBE_DOCKER_REGISTRY:-}" ]; then + DOCKER_REGISTRY=${KUBE_DOCKER_REGISTRY} + fi + KUBECTL_BIN="/usr/bin/kubectl" + if [ "${TEST_CLUSTER:-}" = "true" ]; then + KUBECTL_BIN="/usr/local/bin/kubectl" + fi +} + +# Starts k8s apiserver. +# It prepares the log file, loads the docker image, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars (which are calculated in function compute_master_manifest_variables) +# CLOUD_CONFIG_VOLUME +# CLOUD_CONFIG_MOUNT +# DOCKER_REGISTRY +start_kube_apiserver() { + prepare_log_file /var/log/kube-apiserver.log + # Load the docker image from file. + echo "Try to load docker image file kube-apiserver.tar" + timeout 30 docker load -i /run/kube-docker-files/kube-apiserver.tar + + # Calculate variables and assemble the command line. + params="--cloud-provider=gce --address=127.0.0.1 --etcd-servers=http://127.0.0.1:4001 --tls-cert-file=/etc/srv/kubernetes/server.cert --tls-private-key-file=/etc/srv/kubernetes/server.key --secure-port=443 --client-ca-file=/etc/srv/kubernetes/ca.crt --token-auth-file=/etc/srv/kubernetes/known_tokens.csv --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv --allow-privileged=true" + if [ -n "${NUM_NODES:-}" ] && [ "${NUM_NODES}" -gt 50 ]; then + params="${params} --etcd-servers-overrides=/events#http://127.0.0.1:4002" + fi + if [ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]; then + params="${params} --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" + fi + if [ -n "${ADMISSION_CONTROL:-}" ]; then + params="${params} --admission-control=${ADMISSION_CONTROL}" + fi + if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then + params="${params} --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}" + fi + if [ -n "${RUNTIME_CONFIG:-}" ]; then + params="${params} --runtime-config=${RUNTIME_CONFIG}" + fi + if [ -n "${APISERVER_TEST_ARGS:-}" ]; then + params="${params} ${APISERVER_TEST_ARGS}" + fi + log_level="--v=2" + if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then + log_level="${API_SERVER_TEST_LOG_LEVEL}" + fi + params="${params} ${log_level}" + + if [ -n "${PROJECT_ID:-}" ] && [ -n "${TOKEN_URL:-}" ] && [ -n "${TOKEN_BODY:-}" ] && [ -n "${NODE_NETWORK:-}" ]; then + readonly vm_external_ip=$(curl --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip") + params="${params} --cloud-config=/etc/gce.conf --advertise-address=${vm_external_ip} --ssh-user=${PROXY_SSH_USER} --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile" + fi + readonly kube_apiserver_docker_tag=$(cat /run/kube-docker-files/kube-apiserver.docker_tag) + + src_file="/run/kube-manifests/kubernetes/trusty/kube-apiserver.manifest" + sed -i -e "s@{{params}}@${params}@g" ${src_file} + sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" ${src_file} + sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" ${src_file} + sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" ${src_file} + sed -i -e "s@{{kube-apiserver_docker_tag}}@${kube_apiserver_docker_tag}@g" ${src_file} + cp ${src_file} /etc/kubernetes/manifests +} + +# Starts k8s controller manager. +# It prepares the log file, loads the docker image, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars (which are calculated in function compute_master_manifest_variables) +# CLOUD_CONFIG_VOLUME +# CLOUD_CONFIG_MOUNT +# DOCKER_REGISTRY +start_kube_controller_manager() { + prepare_log_file /var/log/kube-controller-manager.log + # Load the docker image from file. + echo "Try to load docker image file kube-controller-manager.tar" + timeout 30 docker load -i /run/kube-docker-files/kube-controller-manager.tar + + # Calculate variables and assemble the command line. + params="--master=127.0.0.1:8080 --cloud-provider=gce --root-ca-file=/etc/srv/kubernetes/ca.crt --service-account-private-key-file=/etc/srv/kubernetes/server.key" + if [ -n "${INSTANCE_PREFIX:-}" ]; then + params="${params} --cluster-name=${INSTANCE_PREFIX}" + fi + if [ -n "${CLUSTER_IP_RANGE:-}" ]; then + params="${params} --cluster-cidr=${CLUSTER_IP_RANGE}" + fi + if [ "${ALLOCATE_NODE_CIDRS:-}" = "true" ]; then + params="${params} --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" + fi + if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then + params="${params} --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}" + fi + log_level="--v=2" + if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then + log_level="${CONTROLLER_MANAGER_TEST_LOG_LEVEL}" + fi + params="${params} ${log_level}" + if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then + params="${params} ${CONTROLLER_MANAGER_TEST_ARGS}" + fi + readonly kube_rc_docker_tag=$(cat /run/kube-docker-files/kube-controller-manager.docker_tag) + + src_file="/run/kube-manifests/kubernetes/trusty/kube-controller-manager.manifest" + sed -i -e "s@{{params}}@${params}@g" ${src_file} + sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" ${src_file} + sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" ${src_file} + sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" ${src_file} + sed -i -e "s@{{kube-controller-manager_docker_tag}}@${kube_rc_docker_tag}@g" ${src_file} + cp ${src_file} /etc/kubernetes/manifests +} + +# Start k8s scheduler. +# It prepares the log file, loads the docker image, calculates variables, sets them +# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests. +# +# Assumed vars (which are calculated in compute_master_manifest_variables()) +# DOCKER_REGISTRY +start_kube_scheduler() { + prepare_log_file /var/log/kube-scheduler.log + # Load the docker image from file. + echo "Try to load docker image file kube-scheduler.tar" + timeout 30 docker load -i /run/kube-docker-files/kube-scheduler.tar + + # Calculate variables and set them in the manifest. + params="" + log_level="--v=2" + if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then + log_level="${SCHEDULER_TEST_LOG_LEVEL}" + fi + params="${params} ${log_level}" + if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then + params="${params} ${SCHEDULER_TEST_ARGS}" + fi + readonly kube_scheduler_docker_tag=$(cat /run/kube-docker-files/kube-scheduler.docker_tag) + + # Remove salt comments and replace variables with values + src_file="/run/kube-manifests/kubernetes/trusty/kube-scheduler.manifest" + sed -i "/^ *{%/d" ${src_file} + sed -i -e "s@{{params}}@${params}@g" ${src_file} + sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" ${src_file} + sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" ${src_file} + cp ${src_file} /etc/kubernetes/manifests +} + +# Start a fluentd static pod for logging. +start_fluentd() { + if [ "${ENABLE_NODE_LOGGING:-}" = "true" ]; then + if [ "${LOGGING_DESTINATION:-}" = "gcp" ]; then + cp /run/kube-manifests/kubernetes/fluentd-gcp.yaml /etc/kubernetes/manifests/ + elif [ "${LOGGING_DESTINATION:-}" = "elasticsearch" ]; then + cp /run/kube-manifests/kubernetes/fluentd-es.yaml /etc/kubernetes/manifests/ + fi + fi +} + +# A helper function for copying addon manifests and set dir/files +# permissions. +# $1: addon category under /etc/kubernetes +# $2: manifest source dir +setup_addon_manifests() { + src_dir="/run/kube-manifests/kubernetes/trusty/$2" + dst_dir="/etc/kubernetes/$1/$2" + if [ ! -d "${dst_dir}" ]; then + mkdir -p "${dst_dir}" + fi + files=$(find "${src_dir}" -name "*.yaml") + if [ -n "${files}" ]; then + cp "${src_dir}/"*.yaml "${dst_dir}" + fi + files=$(find "${src_dir}" -name "*.json") + if [ -n "${files}" ]; then + cp "${src_dir}/"*.json "${dst_dir}" + fi + files=$(find "${src_dir}" -name "*.yaml.in") + if [ -n "${files}" ]; then + cp "${src_dir}/"*.yaml.in "${dst_dir}" + fi + chown -R root:root "${dst_dir}" + chmod 755 "${dst_dir}" + chmod 644 "${dst_dir}"/* +} + +# Start k8s addons static pods. +# +# Assumed vars (which are calculated in function compute_master_manifest_variables) +# KUBECTL_BIN +start_kube_addons() { + # Fluentd + start_fluentd + + addon_src_dir="/run/kube-manifests/kubernetes/trusty" + addon_dst_dir="/etc/kubernetes/addons" + # Set up manifests of other addons. + if [ "${ENABLE_CLUSTER_MONITORING:-}" = "influxdb" ] || \ + [ "${ENABLE_CLUSTER_MONITORING:-}" = "google" ] || \ + [ "${ENABLE_CLUSTER_MONITORING:-}" = "standalone" ] || \ + [ "${ENABLE_CLUSTER_MONITORING:-}" = "googleinfluxdb" ]; then + file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}" + setup_addon_manifests "addons" "${file_dir}" + # Replace the salt configurations with variable values. + heapster_memory="300Mi" + if [ -n "${NUM_NODES:-}" ] && [ "${NUM_NODES}" -gt 1 ]; then + heapster_memory="$((${NUM_NODES} * 12 + 200))Mi" + fi + controller_yaml="${addon_dst_dir}/${file_dir}" + if [ "${ENABLE_CLUSTER_MONITORING:-}" = "googleinfluxdb" ]; then + controller_yaml="${controller_yaml}/heapster-controller-combined.yaml" + else + controller_yaml="${controller_yaml}/heapster-controller.yaml" + fi + sed -i "/^ *{%/d" "${controller_yaml}" + sed -i -e "s@{{ *heapster_memory *}}@${heapster_memory}@g" "${controller_yaml}" + fi + cp "${addon_src_dir}/namespace.yaml" "${addon_dst_dir}" + if [ "${ENABLE_L7_LOADBALANCING:-}" = "glbc" ]; then + setup_addon_manifests "addons" "cluster-loadbalancing/glbc" + fi + if [ "${ENABLE_CLUSTER_DNS:-}" = "true" ]; then + setup_addon_manifests "addons" "dns" + dns_rc_file="${addon_dst_dir}/dns/skydns-rc.yaml" + dns_svc_file="${addon_dst_dir}/dns/skydns-svc.yaml" + mv "${addon_dst_dir}/dns/skydns-rc.yaml.in" "${dns_rc_file}" + mv "${addon_dst_dir}/dns/skydns-svc.yaml.in" "${dns_svc_file}" + # Replace the salt configurations with variable values. + sed -i -e "s@{{ *pillar\['dns_replicas'\] *}}@${DNS_REPLICAS}@g" "${dns_rc_file}" + sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${dns_rc_file}" + sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${dns_svc_file}" + fi + if [ "${ENABLE_CLUSTER_REGISTRY:-}" = "true" ]; then + setup_addon_manifests "addons" "registry" + registry_pv_file="${addon_dst_dir}/registry/registry-pv.yaml" + registry_pvc_file="${addon_dst_dir}/registry/registry-pvc.yaml" + mv "${addon_dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}" + mv "${addon_dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}" + # Replace the salt configurations with variable values. + sed -i "/^ *{%/d" "${registry_pv_file}" + sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}" + sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}" + sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}" + fi + if [ "${ENABLE_NODE_LOGGING:-}" = "true" ] && \ + [ "${LOGGING_DESTINATION:-}" = "elasticsearch" ] && \ + [ "${ENABLE_CLUSTER_LOGGING:-}" = "true" ]; then + setup_addon_manifests "addons" "fluentd-elasticsearch" + fi + if [ "${ENABLE_CLUSTER_UI:-}" = "true" ]; then + setup_addon_manifests "addons" "kube-ui" + fi + if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then + setup_addon_manifests "admission-controls" "limit-range" + fi + + # Run scripts to start addons placed in /etc/kubernetes/addons + addon_script_dir="/var/lib/cloud/scripts/kubernetes" + mkdir -p "${addon_script_dir}" + cp "${addon_src_dir}/kube-addons.sh" "${addon_script_dir}" + cp "${addon_src_dir}/kube-addon-update.sh" "${addon_script_dir}" + chmod 544 "${addon_script_dir}/"*.sh + # In case that upstart does not set the HOME variable or sometimes + # GCE customized trusty has a read-only /root. + export HOME="/root" + mount -t tmpfs tmpfs "${HOME}" + mount --bind -o remount,rw,noexec "${HOME}" + export KUBECTL_BIN + export TOKEN_DIR="/etc/srv/kubernetes" + export kubelet_kubeconfig_file="/var/lib/kubelet/kubeconfig" + export TRUSTY_MASTER="true" + # Run the script to start and monitoring addon manifest changes. + /bin/bash "${addon_script_dir}/kube-addons.sh" +} diff --git a/cluster/gce/trusty/helper.sh b/cluster/gce/trusty/helper.sh index d5c9892e21..206f3237d4 100755 --- a/cluster/gce/trusty/helper.sh +++ b/cluster/gce/trusty/helper.sh @@ -22,10 +22,6 @@ # replaced upstart with systemd as the init system. Consequently, the # configuration cannot work on these images. -# By sourcing debian's helper.sh, we use the same create-master-instance -# functions as debian. But we overwrite the create-node-instance-template -# function to use Ubuntu. -source "${KUBE_ROOT}/cluster/gce/debian/helper.sh" # $1: template name (required) function create-node-instance-template { @@ -35,3 +31,37 @@ function create-node-instance-template { "user-data=${KUBE_ROOT}/cluster/gce/trusty/node.yaml" \ "configure-sh=${KUBE_ROOT}/cluster/gce/trusty/configure.sh" } + +# create-master-instance creates the master instance. If called with +# an argument, the argument is used as the name to a reserved IP +# address for the master. (In the case of upgrade/repair, we re-use +# the same IP.) +# +# It requires a whole slew of assumed variables, partially due to to +# the call to write-master-env. Listing them would be rather +# futile. Instead, we list the required calls to ensure any additional +# variables are set: +# ensure-temp-dir +# detect-project +# get-bearer-token +# +function create-master-instance { + local address_opt="" + [[ -n ${1:-} ]] && address_opt="--address ${1}" + + write-master-env + gcloud compute instances create "${MASTER_NAME}" \ + ${address_opt} \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --machine-type "${MASTER_SIZE}" \ + --image-project="${MASTER_IMAGE_PROJECT}" \ + --image "${MASTER_IMAGE}" \ + --tags "${MASTER_TAG}" \ + --network "${NETWORK}" \ + --scopes "storage-ro,compute-rw,monitoring,logging-write" \ + --can-ip-forward \ + --metadata-from-file \ + "kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/trusty/master.yaml,configure-sh=${KUBE_ROOT}/cluster/gce/trusty/configure.sh" \ + --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" +} diff --git a/cluster/gce/trusty/kube-manifests/kube-apiserver.manifest b/cluster/gce/trusty/kube-manifests/kube-apiserver.manifest new file mode 100644 index 0000000000..fa37f1eacc --- /dev/null +++ b/cluster/gce/trusty/kube-manifests/kube-apiserver.manifest @@ -0,0 +1,98 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"kube-apiserver", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "kube-apiserver", + "image": "{{kube_docker_registry}}/kube-apiserver:{{kube-apiserver_docker_tag}}", + "resources": { + "limits": { + "cpu": "250m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/kube-apiserver {{params}} 1>>/var/log/kube-apiserver.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 8080, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "ports":[ + { "name": "https", + "containerPort": 443, + "hostPort": 443},{ + "name": "local", + "containerPort": 8080, + "hostPort": 8080} + ], + "volumeMounts": [ + {{cloud_config_mount}} + { "name": "srvkube", + "mountPath": "/etc/srv/kubernetes", + "readOnly": true}, + { "name": "logfile", + "mountPath": "/var/log/kube-apiserver.log", + "readOnly": false}, + { "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true}, + { "name": "varssl", + "mountPath": "/var/ssl", + "readOnly": true}, + { "name": "etcopenssl", + "mountPath": "/etc/openssl", + "readOnly": true}, + { "name": "etcpkitls", + "mountPath": "/etc/pki/tls", + "readOnly": true}, + { "name": "srvsshproxy", + "mountPath": "/etc/srv/sshproxy", + "readOnly": false} + ] + } +], +"volumes":[ + {{cloud_config_volume}} + { "name": "srvkube", + "hostPath": { + "path": "/etc/srv/kubernetes"} + }, + { "name": "logfile", + "hostPath": { + "path": "/var/log/kube-apiserver.log"} + }, + { "name": "etcssl", + "hostPath": { + "path": "/etc/ssl"} + }, + { "name": "varssl", + "hostPath": { + "path": "/var/ssl"} + }, + { "name": "etcopenssl", + "hostPath": { + "path": "/etc/openssl"} + }, + { "name": "etcpkitls", + "hostPath": { + "path": "/etc/pki/tls"} + }, + { "name": "srvsshproxy", + "hostPath": { + "path": "/etc/srv/sshproxy"} + } +] +}} diff --git a/cluster/gce/trusty/kube-manifests/kube-controller-manager.manifest b/cluster/gce/trusty/kube-manifests/kube-controller-manager.manifest new file mode 100644 index 0000000000..de91d59ff4 --- /dev/null +++ b/cluster/gce/trusty/kube-manifests/kube-controller-manager.manifest @@ -0,0 +1,83 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"kube-controller-manager", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "kube-controller-manager", + "image": "{{kube_docker_registry}}/kube-controller-manager:{{kube-controller-manager_docker_tag}}", + "resources": { + "limits": { + "cpu": "200m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 10252, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "volumeMounts": [ + {{cloud_config_mount}} + { "name": "srvkube", + "mountPath": "/etc/srv/kubernetes", + "readOnly": true}, + { "name": "logfile", + "mountPath": "/var/log/kube-controller-manager.log", + "readOnly": false}, + { "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true}, + { "name": "varssl", + "mountPath": "/var/ssl", + "readOnly": true}, + { "name": "etcopenssl", + "mountPath": "/etc/openssl", + "readOnly": true}, + { "name": "etcpkitls", + "mountPath": "/etc/pki/tls", + "readOnly": true} + ] + } +], +"volumes":[ + {{cloud_config_volume}} + { "name": "srvkube", + "hostPath": { + "path": "/etc/srv/kubernetes"} + }, + { "name": "logfile", + "hostPath": { + "path": "/var/log/kube-controller-manager.log"} + }, + { "name": "etcssl", + "hostPath": { + "path": "/etc/ssl"} + }, + { "name": "varssl", + "hostPath": { + "path": "/var/ssl"} + }, + { "name": "etcopenssl", + "hostPath": { + "path": "/etc/openssl"} + }, + { "name": "etcpkitls", + "hostPath": { + "path": "/etc/pki/tls"} + } +] +}} diff --git a/cluster/gce/trusty/master.yaml b/cluster/gce/trusty/master.yaml new file mode 100644 index 0000000000..898f7963b4 --- /dev/null +++ b/cluster/gce/trusty/master.yaml @@ -0,0 +1,235 @@ +From nobody Tue Feb 1 11:33:00 2016 +Content-Type: multipart/mixed; boundary="====================================" +MIME-Version: 1.0 + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-env.conf" + +#upstart-job + +description "Prepare kube master environment" + +start on cloud-config + +script + set -o errexit + set -o nounset + + # Fetch the script for configuring the instance. + curl --fail --silent --show-error \ + -H "X-Google-Metadata-Request: True" \ + -o /etc/kube-configure.sh \ + http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh + . /etc/kube-configure.sh + + echo "Configuring hostname" + config_hostname + echo "Configuring IP firewall rules" + config_ip_firewall + echo "Downloading kube-env file" + download_kube_env + . /etc/kube-env + echo "Creating required directories" + create_dirs + echo "Mount master PD" + mount_master_pd + echo "Creating kuberntes master auth file" + create_master_auth + echo "Creating master instance kubelet auth file" + create_master_kubelet_auth +end script + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-install-packages.conf" + +#upstart-job + +description "Install packages needed to run kubernetes" + +start on stopped kube-env + +script + set -o errexit + set -o nounset + + . /etc/kube-configure.sh + install_critical_packages +end script + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-install-additional-packages.conf" + +#upstart-job + +description "Install additional packages used by kubernetes" + +start on stopped kube-install-packages + +script + set -o errexit + set -o nounset + + . /etc/kube-configure.sh + install_additional_packages +end script + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-install-master.conf" + +#upstart-job + +description "Download and install k8s binaries and configurations" + +start on stopped kube-env + +script + set -o errexit + set -o nounset + + . /etc/kube-configure.sh + . /etc/kube-env + install_kube_binary_config +end script + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kubelet.conf" + +#upstart-job + +description "Run kubelet service" + +start on stopped kube-install-packages and stopped kube-install-master + +respawn + +script + set -o errexit + set -o nounset + + echo "Start kubelet upstart job" + + . /etc/kube-env + BINARY_PATH="/usr/bin/kubelet" + if [ "${TEST_CLUSTER:-}" = "true" ]; then + BINARY_PATH="/usr/local/bin/kubelet" + fi + # Assemble command line flags based on env variables. + ARGS="--v=2" + if [ -n "${KUBELET_TEST_ARGS:-}" ]; then + ARGS="${KUBELET_TEST_ARGS}" + fi + if [ ! -z "${KUBELET_APISERVER:-}" ] && [ ! -z "${KUBELET_CERT:-}" ] && [ ! -z "${KUBELET_KEY:-}" ]; then + ARGS="${ARGS} --api-servers=https://${KUBELET_APISERVER}" + ARGS="${ARGS} --register-schedulable=false --reconcile-cidr=false" + ARGS="${ARGS} --pod-cidr=10.123.45.0/30" + else + ARGS="${ARGS} --pod-cidr=${MASTER_IP_RANGE}" + fi + if [ "${ENABLE_MANIFEST_URL:-}" = "true" ]; then + ARGS="${ARGS} --manifest-url=${MANIFEST_URL} --manifest-url-header=${MANIFEST_URL_HEADER}" + fi + + ${BINARY_PATH} \ + --enable-debugging-handlers=false \ + --cloud-provider=gce \ + --config=/etc/kubernetes/manifests \ + --allow-privileged=true \ + --cluster-dns=${DNS_SERVER_IP} \ + --cluster-domain=${DNS_DOMAIN} \ + --configure-cbr0=${ALLOCATE_NODE_CIDRS} \ + --cgroup-root=/ \ + --system-container=/system \ + --nosystemd=true \ + ${ARGS} +end script + +# Wait for 10s to start kubelet again. +post-stop exec sleep 10 + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-docker.conf" + +#upstart-job + +description "Restart docker daemon" + +# The condition "stopped kube-install-additional-packages" is to avoid +# breaking nsenter installation, which is through a docker container. +# It can be removed if we find a better way to install nsenter. +start on started kubelet and stopped kube-install-additional-packages + +script + set -o errexit + set -o nounset + + . /etc/kube-configure.sh + . /etc/kube-env + restart_docker_daemon +end script + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-master-components.conf" + +#upstart-job + +description "Start kube-master components and addons pods" + +start on stopped kube-docker + +script + set -o errexit + set -o nounset + + . /etc/kube-configure.sh + . /etc/kube-env + start_etcd_servers + compute_master_manifest_variables + start_kube_apiserver + start_kube_controller_manager + start_kube_scheduler + start_kube_addons +end script + +--==================================== +MIME-Version: 1.0 +Content-Type: text/upstart-job; charset="us-ascii" +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="kube-master-health-monitoring.conf" + +#upstart-job + +description "Kubenetes master health monitoring" + +start on stopped kube-docker + +script + set -o errexit + set -o nounset + + . /etc/kube-configure.sh + . /etc/kube-env + health_monitoring +end script + +--====================================-- diff --git a/cluster/gce/trusty/node.yaml b/cluster/gce/trusty/node.yaml index e1d90ea539..cb0078af3b 100644 --- a/cluster/gce/trusty/node.yaml +++ b/cluster/gce/trusty/node.yaml @@ -1,4 +1,4 @@ -From nobody Tue Dec 22 10:13:54 2015 +From nobody Tue Feb 1 11:33:00 2016 Content-Type: multipart/mixed; boundary="====================================" MIME-Version: 1.0 @@ -240,15 +240,10 @@ script set -o nounset # Kube-system pod manifest files are located at /run/kube-manifests/kubernetes. + . /etc/kube-configure.sh . /etc/kube-env # Fluentd - if [ "${ENABLE_NODE_LOGGING:-}" = "true" ]; then - if [ "${LOGGING_DESTINATION:-}" = "gcp" ]; then - cp /run/kube-manifests/kubernetes/fluentd-gcp.yaml /etc/kubernetes/manifests/ - elif [ "${LOGGING_DESTINATION:-}" = "elasticsearch" ]; then - cp /run/kube-manifests/kubernetes/fluentd-es.yaml /etc/kubernetes/manifests/ - fi - fi + start_fluentd # Kube-registry-proxy if [ "${ENABLE_CLUSTER_REGISTRY:-}" = "true" ]; then cp /run/kube-manifests/kubernetes/kube-registry-proxy.yaml /etc/kubernetes/manifests/ @@ -275,28 +270,9 @@ script # TODO(andyzheng0831): replace it with a more reliable method if possible. sleep 60 + . /etc/kube-configure.sh . /etc/kube-env - sleep_seconds=10 - max_seconds=10 - # We simply kill the process when there is a failure. Another upstart job will automatically - # restart the process. - while [ 1 ]; do - if ! timeout 20 docker ps > /dev/null; then - echo "Docker daemon failed!" - pkill docker - fi - if ! curl --insecure -m ${max_seconds} -f -s https://127.0.0.1:${KUBELET_PORT:-10250}/healthz > /dev/null; then - echo "Kubelet is unhealthy!" - pkill kubelet - fi - if ! curl -m ${max_seconds} -f -s http://127.0.0.1:10249/healthz > /dev/null; then - echo "Kube-proxy is unhealthy!" - # Get the ID of kube-proxy container and then kill it. - container=$(docker ps -q --filter name='k8s_kube-proxy') - docker kill ${container} - fi - sleep ${sleep_seconds} - done + health_monitoring end script --====================================-- diff --git a/cluster/saltbase/salt/kube-addons/kube-addons.sh b/cluster/saltbase/salt/kube-addons/kube-addons.sh index d158d3340d..19fafb6d2c 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addons.sh +++ b/cluster/saltbase/salt/kube-addons/kube-addons.sh @@ -23,6 +23,7 @@ ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600} SYSTEM_NAMESPACE=kube-system token_dir=${TOKEN_DIR:-/srv/kubernetes} +trusty_master=${TRUSTY_MASTER:-false} function ensure_python() { if ! python --version > /dev/null 2>&1; then @@ -162,8 +163,11 @@ function load-docker-images() { # managed result is of that. Start everything below that directory. echo "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} ==" -# Load any images that we may need -load-docker-images /srv/salt/kube-addons-images +# Load any images that we may need. This is not needed for trusty master and +# the way it restarts docker daemon does not work for trusty. +if [[ "${trusty_master}" == "false" ]]; then + load-docker-images /srv/salt/kube-addons-images +fi ensure_python