Skip to content

Instantly share code, notes, and snippets.

@ric03uec
Last active August 29, 2015 14:26
Show Gist options
  • Save ric03uec/265e8d76698cfa39e4f1 to your computer and use it in GitHub Desktop.
Save ric03uec/265e8d76698cfa39e4f1 to your computer and use it in GitHub Desktop.
kube install multi node
# Docker Upstart and SysVinit configuration file
# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"
DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
DOCKER_OPTS="$DOCKER_OPTS -H unix:///var/run/docker.sock -g=/data --storage-driver aufs"
description "Docker daemon"
start on started flanneld
stop on stopping flanneld
limit nofile 524288 1048576
limit nproc 524288 1048576
respawn
pre-start script
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
exit 0
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
DOCKER=/usr/bin/$UPSTART_JOB
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f /var/run/flannel/subnet.env ]; then
## if flannel subnet env is present, then use it to define
## the subnet and MTU values
. /var/run/flannel/subnet.env
DOCKER_OPTS="$DOCKER_OPTS --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}"
else
echo "Flannel subnet not found, exiting..."
exit 1
fi
exec "$DOCKER" -d $DOCKER_OPTS
end script
# Don't emit "started" event until docker.sock is ready.
# See https://github.com/docker/docker/issues/6647
post-start script
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then
while ! [ -e /var/run/docker.sock ]; do
initctl status $UPSTART_JOB | grep -q "stop/" && exit 1
echo "Waiting for /var/run/docker.sock"
sleep 0.1
done
echo "/var/run/docker.sock is up"
fi
end script
#etcd config
export ETCD_EXECUTABLE_LOCATION=/usr/bin
export ETCD=$ETCD_EXECUTABLE_LOCATION/etcd
ETCD_OPTS='--listen-client-urls=http://0.0.0.0:4001 --data-dir=/etcd --advertise-client-urls=http://0.0.0.0:4001'
description "Etcd service"
author "@jainvipin"
start on filesystem or runlevel [2345]
stop on runlevel [!2345]
respawn
pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
ETCD=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $ETCD ]; then
exit 0
fi
echo "$ETCD binary not found, exiting"
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
ETCD=/opt/bin/$UPSTART_JOB
ETCD_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$ETCD" $ETCD_OPTS
end script
description "Flanneld service"
author "@ric03uec"
start on filesystem or runlevel [2345]
stop on runlevel [!2345]
respawn
pre-start script
FLANNELD=/usr/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $FLANNELD ]; then
exit 0
fi
echo "$FLANNELD binary not found, exiting"
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/flanneld)
FLANNELD=/usr/bin/$UPSTART_JOB
FLANNELD_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$FLANNELD" $FLANNELD_OPTS
end script
# Kube-Apiserver Upstart and SysVinit configuration file
KUBE_APISERVER=/usr/bin/kube-apiserver
KUBE_APISERVER_OPTS="--address=0.0.0.0 --port=8080 --etcd_servers=http://localhost:4001 --portal_net=11.1.1.0/24 --allow_privileged=true --kubelet_port=10250 --v=0 "
description "Kube-Apiserver service"
author "@jainvipin"
# respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
# see also https://github.com/jainvipin/kubernetes-start
KUBE_APISERVER=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_APISERVER ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
KUBE_APISERVER=/opt/bin/$UPSTART_JOB
KUBE_APISERVER_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_APISERVER" $KUBE_APISERVER_OPTS
end script
[terminal-1 (kube-master)] $ kubectl get minions
NAME LABELS STATUS
democluster-slave <none> Ready
[terminal-1 (kube-master)] $ kubectl run-container my-nginx --image=dockerfile/nginx --replicas=3 --port=80
[terminal-1 (kube-master)] $ kubectl get pods
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED
my-nginx-fplln 172.17.0.12 my-nginx dockerfile/nginx democluster-slave/192.168.33.11 run-container=my-nginx Running 4 minutes
my-nginx-gziey 172.17.0.13 my-nginx dockerfile/nginx democluster-slave/192.168.33.11 run-container=my-nginx Running 4 minutes
my-nginx-oh43e 172.17.0.14 my-nginx dockerfile/nginx democluster-slave/192.168.33.11 run-container=my-nginx Running 4 minutes
[terminal-1 (kube-master)] $ echo "holy cow!!! this works"
## default config file for kube-controller-manager
## location /etc/default/kube-controller-manager
export KUBERNETES_EXECUTABLE_LOCATION=/usr/bin
KUBE_CONTROLLER_MANAGER=$KUBERNETES_EXECUTABLE_LOCATION/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--address=0.0.0.0 --master=127.0.0.1:8080 --v=0 "
description "Kube-Controller-Manager service"
author "@jainvipin"
# respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
# see also https://github.com/jainvipin/kubernetes-ubuntu-start
KUBE_CONTROLLER_MANAGER=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_CONTROLLER_MANAGER ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
KUBE_CONTROLLER_MANAGER=/opt/bin/$UPSTART_JOB
KUBE_CONTROLLER_MANAGER_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_CONTROLLER_MANAGER" $KUBE_CONTROLLER_MANAGER_OPTS
end script
#!/bin/bash -e
#
# Copyright 2015 Shippable Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
print_help() {
echo "Usage:
./kube-installer.sh
Options:
--master <master ip address> Install kube master with provided IP
--slave <slave ip address> <master ip address> Install kube slave with provided IP
"
}
if [[ $# > 0 ]]; then
if [[ "$1" == "--slave" ]]; then
export INSTALLER_TYPE=slave
if [[ ! -z "$2" ]] && [[ ! -z "$3" ]]; then
export SLAVE_IP=$2
export MASTER_IP=$3
else
echo "Error!! missing Slave IP or Master IP"
print_help
exit 1
fi
elif [[ "$1" == "--master" ]]; then
export INSTALLER_TYPE=master
if [[ ! -z "$2" ]]; then
export MASTER_IP=$2
else
echo "Error!! please provide Master IP"
print_help
exit 1
fi
else
print_help
exit 1
fi
else
print_help
exit 1
fi
echo "####################################################################"
echo "#################### Installing kubernetes $INSTALLER_TYPE #########"
echo "####################################################################"
export KUBERNETES_RELEASE_VERSION=v1.0.1
export ETCD_VERSION=v2.0.5
export DEFAULT_CONFIG_PATH=/etc/default
export ETCD_EXECUTABLE_LOCATION=/usr/bin
export ETCD_PORT=4001
export KUBERNETES_CLUSTER_ID=k8sCluster
export KUBERNETES_DOWNLOAD_PATH=/tmp
export KUBERNETES_EXTRACT_DIR=$KUBERNETES_DOWNLOAD_PATH/kubernetes
export KUBERNETES_DIR=$KUBERNETES_EXTRACT_DIR/kubernetes
export KUBERNETES_SERVER_BIN_DIR=$KUBERNETES_DIR/server/kubernetes/server/bin
export KUBERNETES_EXECUTABLE_LOCATION=/usr/bin
export KUBERNETES_MASTER_HOSTNAME=$KUBERNETES_CLUSTER_ID-master
export KUBERNETES_SLAVE_HOSTNAME=$KUBERNETES_CLUSTER_ID-slave
export SCRIPT_DIR=$PWD
# Indicates whether the install has succeeded
export is_success=false
install_etcd() {
if [[ $INSTALLER_TYPE == 'master' ]]; then
## download, extract and update etcd binaries ##
echo 'Installing etcd on master...'
cd $KUBERNETES_DOWNLOAD_PATH;
sudo rm -r etcd-$ETCD_VERSION-linux-amd64 || true;
etcd_download_url="https://github.com/coreos/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-amd64.tar.gz";
sudo curl -L $etcd_download_url -o etcd.tar.gz;
sudo tar xzvf etcd.tar.gz && cd etcd-$ETCD_VERSION-linux-amd64;
sudo mv -v etcd $ETCD_EXECUTABLE_LOCATION/etcd;
sudo mv -v etcdctl $ETCD_EXECUTABLE_LOCATION/etcdctl;
etcd_path=$(which etcd);
if [[ -z "$etcd_path" ]]; then
echo 'etcd not installed ...'
return 1
else
echo 'etcd successfully installed ...'
echo $etcd_path;
etcd --version;
fi
else
echo "Installing for slave, skipping etcd..."
fi
}
install_docker() {
echo "installing docker .........."
docker_path=$(which docker);
if [[ -z "$docker_path" ]]; then
sudo apt-get install -y linux-image-extra-`uname -r`
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install -y lxc-docker
else
echo "Docker already installed,skipping..."
fi
}
install_prereqs() {
echo "Installing network prereqs on slave..."
sudo apt-get install -yy bridge-utils
}
clear_network_entities() {
## remove the docker0 bridge created by docker daemon
echo 'stopping docker'
sudo service docker stop || true
sudo ip link set dev docker0 down || true
sudo brctl delbr docker0 || true
}
download_flannel_release() {
echo 'Downloading flannel release version: $FLANNEL_VERSION'
cd $KUBERNETES_DOWNLOAD_PATH
flannel_download_url="https://github.com/coreos/flannel/releases/download/v$FLANNEL_VERSION/flannel-$FLANNEL_VERSION-linux-amd64.tar.gz";
sudo curl --max-time 180 -L $flannel_download_url -o flannel.tar.gz;
sudo tar xzvf flannel.tar.gz && cd flannel-$FLANNEL_VERSION;
sudo mv -v flanneld $FLANNEL_EXECUTABLE_LOCATION/flanneld;
}
update_flanneld_config() {
echo 'updating flanneld config'
echo "FLANNELD_OPTS='-etcd-endpoints=http://$MASTER_IP:$ETCD_PORT -iface=$SLAVE_IP -ip-masq=true'" | sudo tee -a /etc/default/flanneld
}
update_hosts() {
echo "Updating /etc/hosts..."
if [[ $INSTALLER_TYPE == 'master' ]]; then
echo "updating /etc/hosts to add master IP entry"
echo "$MASTER_IP $KUBERNETES_MASTER_HOSTNAME" | sudo tee -a /etc/hosts
else
echo "$SLAVE_IP $KUBERNETES_SLAVE_HOSTNAME" | sudo tee -a /etc/hosts
fi
cat /etc/hosts
}
download_kubernetes_release() {
## download and extract kubernetes archive ##
echo 'Downloading kubernetes release version: $KUBERNETES_RELEASE_VERSION'
cd $KUBERNETES_DOWNLOAD_PATH
mkdir -p $KUBERNETES_EXTRACT_DIR
kubernetes_download_url="https://github.com/GoogleCloudPlatform/kubernetes/releases/download/$KUBERNETES_RELEASE_VERSION/kubernetes.tar.gz";
sudo curl -L $kubernetes_download_url -o kubernetes.tar.gz;
sudo tar xzvf kubernetes.tar.gz -C $KUBERNETES_EXTRACT_DIR;
}
extract_server_binaries() {
## extract the kubernetes server binaries ##
echo 'Extracting kubernetes server binaries from $KUBERNETES_DIR'
#cd $KUBERNETES_DIR/server
sudo su -c "cd $KUBERNETES_DIR/server && tar xzvf $KUBERNETES_DIR/server/kubernetes-server-linux-amd64.tar.gz"
echo 'Successfully extracted kubernetes server binaries'
}
update_master_binaries() {
# place binaries in correct folders
echo 'Updating kubernetes master binaries'
cd $KUBERNETES_SERVER_BIN_DIR
sudo cp -vr * $KUBERNETES_EXECUTABLE_LOCATION/
echo 'Successfully updated kubernetes server binaries to $KUBERNETES_EXECUTABLE_LOCATION'
}
copy_master_binaries() {
echo "Copying binary files for master components"
sudo cp -vr $KUBERNETES_SERVER_BIN_DIR/kube-apiserver $KUBERNETES_EXECUTABLE_LOCATION/
sudo cp -vr $KUBERNETES_SERVER_BIN_DIR/kube-controller-manager $KUBERNETES_EXECUTABLE_LOCATION/
sudo cp -vr $KUBERNETES_SERVER_BIN_DIR/kube-scheduler $KUBERNETES_EXECUTABLE_LOCATION/
sudo cp -vr $KUBERNETES_SERVER_BIN_DIR/kubectl $KUBERNETES_EXECUTABLE_LOCATION/
}
copy_master_configs() {
echo "Copying 'default' files for master components"
sudo cp -vr $SCRIPT_DIR/etcd.conf /etc/init/etcd.conf
sudo cp -vr $SCRIPT_DIR/etcd /etc/default/etcd
sudo cp -vr $SCRIPT_DIR/kube-apiserver.conf /etc/init/kube-apiserver.conf
sudo cp -vr $SCRIPT_DIR/kube-apiserver /etc/default/kube-apiserver
sudo cp -vr $SCRIPT_DIR/kube-scheduler.conf /etc/init/kube-scheduler.conf
sudo cp -vr $SCRIPT_DIR/kube-scheduler /etc/default/kube-scheduler
sudo cp -vr $SCRIPT_DIR/kube-controller-manager.conf /etc/init/kube-controller-manager.conf
sudo cp -vr $SCRIPT_DIR/kube-controller-manager /etc/default/kube-controller-manager
}
copy_slave_binaries() {
true
}
copy_slave_configs() {
# update kubelet config
echo "KUBELET=$KUBERNETES_EXECUTABLE_LOCATION/kubelet" | sudo tee -a /etc/default/kubelet
echo "KUBELET_OPTS=\"--address=0.0.0.0 --port=10250 --hostname_override=$KUBERNETES_SLAVE_HOSTNAME --api_servers=http://$KUBERNETES_MASTER_HOSTNAME:8080 --etcd_servers=http://$KUBERNETES_MASTER_HOSTNAME:4001 --enable_server=true --logtostderr=true --v=0\"" | sudo tee -a /etc/default/kubelet
echo "kubelet config updated successfully"
# update kube-proxy config
echo "KUBE_PROXY=$KUBERNETES_EXECUTABLE_LOCATION/kube-proxy" | sudo tee -a /etc/default/kube-proxy
echo -e "KUBE_PROXY_OPTS=\"--etcd_servers=http://$KUBERNETES_MASTER_HOSTNAME:4001 --master=$KUBERNETES_MASTER_HOSTNAME:8080 --logtostderr=true \"" | sudo tee -a /etc/default/kube-proxy
echo "kube-proxy config updated successfully"
}
remove_redundant_config() {
# remove the config files for redundant services so that they
# dont boot up if server restarts
if [[ $INSTALLER_TYPE == 'master' ]]; then
echo 'removing redundant service configs for master ...'
# removing from /etc/init
sudo rm -rf /etc/init/kubelet.conf || true
sudo rm -rf /etc/init/kube-proxy.conf || true
# removing from /etc/init.d
sudo rm -rf /etc/init.d/kubelet || true
sudo rm -rf /etc/init.d/kube-proxy || true
# removing config from /etc/default
sudo rm -rf /etc/default/kubelet || true
sudo rm -rf /etc/default/kube-proxy || true
else
echo 'removing redundant service configs for master...'
# removing from /etc/init
sudo rm -rf /etc/init/kube-apiserver.conf || true
sudo rm -rf /etc/init/kube-controller-manager.conf || true
sudo rm -rf /etc/init/kube-scheduler.conf || true
# removing from /etc/init.d
sudo rm -rf /etc/init.d/kube-apiserver || true
sudo rm -rf /etc/init.d/kube-controller-manager || true
sudo rm -rf /etc/init.d/kube-scheduler || true
# removing from /etc/default
sudo rm -rf /etc/default/kube-apiserver || true
sudo rm -rf /etc/default/kube-controller-manager || true
sudo rm -rf /etc/default/kube-scheduler || true
fi
}
stop_services() {
# stop any existing services
if [[ $INSTALLER_TYPE == 'master' ]]; then
echo 'Stopping master services...'
sudo service etcd stop || true
sudo service kube-apiserver stop || true
sudo service kube-controller-manager stop || true
sudo service kube-scheduler stop || true
else
echo 'Stopping slave services...'
sudo service flanneld stop || true
sudo service kubelet stop || true
sudo service kube-proxy stop || true
fi
}
start_services() {
if [[ $INSTALLER_TYPE == 'master' ]]; then
echo 'Starting master services...'
sudo service etcd start
## No need to start kube-apiserver, kube-controller-manager and kube-scheduler
## because the upstart scripts boot them up when etcd starts
else
echo 'Starting slave services...'
sudo service kubelet start
sudo service kube-proxy start
fi
}
check_service_status() {
if [[ $INSTALLER_TYPE == 'master' ]]; then
sudo service etcd status
sudo service kube-apiserver status
sudo service kube-controller-manager status
sudo service kube-scheduler status
echo 'install of kube-master successful'
is_success=true
else
echo 'Checking slave services status...'
sudo service kubelet status
sudo service kube-proxy status
echo 'install of kube-slave successful'
is_success=true
fi
}
before_exit() {
if [ "$is_success" == true ]; then
echo "Script Completed Successfully";
else
echo "Script executing failed";
fi
}
trap before_exit EXIT
update_hosts
trap before_exit EXIT
stop_services
trap before_exit EXIT
remove_redundant_config
trap before_exit EXIT
download_kubernetes_release
trap before_exit EXIT
extract_server_binaries
if [[ $INSTALLER_TYPE == 'slave' ]]; then
trap before_exit EXIT
install_docker
trap before_exit EXIT
copy_slave_binaries
trap before_exit EXIT
copy_slave_configs
trap before_exit EXIT
install_prereqs
trap before_exit EXIT
clear_network_entities
trap before_exit EXIT
download_flannel_release
trap before_exit EXIT
update_flanneld_config
trap before_exit EXIT
update_slave_config
else
trap before_exit EXIT
install_etcd
trap before_exit EXIT
copy_master_binaries
trap before_exit EXIT
copy_master_configs
fi
trap before_exit EXIT
start_services
trap before_exit EXIT
check_service_status
echo "Kubernetes $INSTALLER_TYPE install completed"
#kube-scheduler config
export KUBERNETES_EXECUTABLE_LOCATION=/usr/bin
KUBE_SCHEDULER=$KUBERNETES_EXECUTABLE_LOCATION/kube-scheduler
KUBE_SCHEDULER_OPTS="--address=0.0.0.0 --master=127.0.0.1:8080 --v=0 "
description "Kube-Scheduler service"
author "@jainvipin"
# respawn
# start in conjunction with etcd
start on started etcd
stop on stopping etcd
pre-start script
# see also https://github.com/jainvipin/kubernetes-start
KUBE_SCHEDULER=/opt/bin/$UPSTART_JOB
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if [ -f $KUBE_SCHEDULER ]; then
exit 0
fi
exit 22
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
KUBE_SCHEDULER=/opt/bin/$UPSTART_JOB
KUBE_SCHEDULER_OPTS=""
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$KUBE_SCHEDULER" $KUBE_SCHEDULER_OPTS
end script
#!/bin/bash -e
<%
/*
#
# Shippable kubernetes slave installer
#
# Required env vars:
#
# NODE_TYPE
# type of node e.g. ubuntu1204/fedora etc
#
# MASTER_IP
# ip address of master node. This is the IP
# slaves will use to connect to master
#
# SLAVE_IP
# ip address of the slave.
#
# PORTS_LIST
# list of ports that should be available to the slave
#
# KUBERNETES_RELEASE_VERSION
# release version of kubernetes to install
#
# KUBERNETES_CLUSTER_ID
# uuid of the kubernetes cluster
#
########################################
*/
%>
# Export environment variables
<% _.each(scriptData.environmentVariables, function(e) { %>
export <%= e %>;
<% }); %>
# End of environment exports
export DEFAULT_CONFIG_PATH=/etc/default
export KUBERNETES_DOWNLOAD_PATH=/tmp
export ETCD_PORT=4001
export KUBERNETES_EXTRACT_DIR=$KUBERNETES_DOWNLOAD_PATH/kubernetes
export KUBERNETES_DIR=$KUBERNETES_EXTRACT_DIR/kubernetes
export KUBERNETES_SERVER_BIN_DIR=$KUBERNETES_DIR/server/kubernetes/server/bin
export KUBERNETES_EXECUTABLE_LOCATION=/usr/bin
export KUBERNETES_MASTER_HOSTNAME=$KUBERNETES_CLUSTER_ID-master
export KUBERNETES_SLAVE_HOSTNAME=$KUBERNETES_CLUSTER_ID-slave
export FLANNEL_EXECUTABLE_LOCATION=/usr/bin
export shippable_group_name="kube-install-slave"
export MAX_FILE_DESCRIPTORS=900000
export MAX_WATCHERS=524288
export MAX_CONNECTIONS=196608
export CONNECTION_TIMEOUT=500
export ESTABLISHED_CONNECTION_TIMEOUT=86400
# Indicates whether the install has succeeded
export is_success=false
######################### REMOTE COPY SECTION ##########################
########################################################################
## SSH variables ###
export NODE_SSH_IP=$SLAVE_IP
export NODE_SSH_PORT=22
export NODE_SSH_USER=shippable
export NODE_SSH_PRIVATE_KEY=$NODE_SSH_PRIVATE_KEY
## Read command line args ###
block_uuid=$1
script_uuid=$2
copy_kube_slave_install_script() {
echo "copying kernel install script to remote host: $NODE_SSH_IP"
script_folder="/tmp/$block_uuid"
script_name="$block_uuid-$script_uuid.sh"
script_path="/tmp/$block_uuid/$script_name"
node_key_path=$script_folder/node_key
copy_key=$(echo -e "$NODE_SSH_PRIVATE_KEY" > $node_key_path)
chmod_cmd="chmod -cR 600 $node_key_path"
chmod_out=$($chmod_cmd)
echo "Removing any host key if present"
remove_key_cmd="ssh-keygen -f '$HOME/.ssh/known_hosts' -R $NODE_SSH_IP"
{
eval $remove_key_cmd
} || {
echo "Key not present for the host: $NODE_SSH_IP"
}
copy_cmd="rsync -avz -e 'ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 -p $NODE_SSH_PORT -i $node_key_path -C -c blowfish' $script_folder $NODE_SSH_USER@$NODE_SSH_IP:/tmp"
echo "executing $copy_cmd"
copy_cmd_out=$(eval $copy_cmd)
echo $copy_cmd_out
mkdir_cmd="ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 -p $NODE_SSH_PORT -i $node_key_path $NODE_SSH_USER@$NODE_SSH_IP mkdir -p $script_folder"
echo "creating script directory: $mkdir_cmd"
create_dir_out=$(eval $mkdir_cmd)
echo $create_dir_out
execute_cmd="ssh -o StrictHostKeyChecking=no -o NumberOfPasswordPrompts=0 -p $NODE_SSH_PORT -i $node_key_path $NODE_SSH_USER@$NODE_SSH_IP sudo $script_path"
echo "executing command: $execute_cmd"
eval $execute_cmd
}
######################### REMOTE COPY SECTION ENDS ##########################
copy_configs() {
echo "copying config files "
script_folder=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
<% _.each(setupFiles, function(e) { %>
copy_cmd="sudo cp -vr $script_folder/<%= e.name %> <%= e.installPath %>"
exec_cmd "$copy_cmd"
<% }); %>
}
#
# Prints the command start and end markers with timestamps
# and executes the supplied command
#
exec_cmd() {
cmd=$@
cmd_uuid=$(python -c 'import uuid; print str(uuid.uuid4())')
cmd_start_timestamp=`date +"%s"`
echo "__SH__CMD__START__|{\"type\":\"cmd\",\"sequenceNumber\":\"$cmd_start_timestamp\",\"id\":\"$cmd_uuid\"}|$cmd"
eval "$cmd"
cmd_status=$?
if [ "$2" ]; then
echo $2;
fi
cmd_end_timestamp=`date +"%s"`
echo "__SH__CMD__END__|{\"type\":\"cmd\",\"sequenceNumber\":\"$cmd_start_timestamp\",\"id\":\"$cmd_uuid\",\"completed\":\"$cmd_status\"}|$cmd"
return $cmd_status
}
exec_grp() {
group_name=$1
group_uuid=$(python -c 'import uuid; print str(uuid.uuid4())')
group_start_timestamp=`date +"%s"`
echo "__SH__GROUP__START__|{\"type\":\"grp\",\"sequenceNumber\":\"$group_start_timestamp\",\"id\":\"$group_uuid\"}|$group_name"
eval "$group_name"
group_status=$?
group_end_timestamp=`date +"%s"`
echo "__SH__GROUP__END__|{\"type\":\"grp\",\"sequenceNumber\":\"$group_end_timestamp\",\"id\":\"$group_uuid\",\"completed\":\"$group_status\"}|$group_name"
}
start_exec_grp() {
group_name=$1
group_uuid=$(python -c 'import uuid; print str(uuid.uuid4())')
group_start_timestamp=`date +"%s"`
echo "__SH__GROUP__START__|{\"type\":\"grp\",\"sequenceNumber\":\"$group_start_timestamp\",\"id\":\"$group_uuid\"}|$group_name"
}
end_exec_grp() {
group_end_timestamp=`date +"%s"`
echo "__SH__GROUP__END__|{\"type\":\"grp\",\"sequenceNumber\":\"$group_end_timestamp\",\"id\":\"$group_uuid\",\"completed\":\"$group_status\"}|$group_name"
}
create_manifests_dir() {
manifests_dir='/etc/kubernetes/manifests'
exec_cmd "echo 'Creating kubelet manifests dir: $manifests_dir'"
exec_cmd "mkdir -p $manifests_dir"
}
enable_forwarding() {
exec_cmd "echo 'net.ipv4.ip_forward=1' | sudo tee -a /etc/sysctl.conf"
exec_cmd "sudo sysctl -p"
}
update_file_limits() {
## increase the max number of file descriptors; applied at kernel limit
exec_cmd "echo 'fs.file-max=$MAX_FILE_DESCRIPTORS' | sudo tee -a /etc/sysctl.conf"
exec_cmd "sudo sysctl -p"
## increase the max files for root user
exec_cmd "echo '* hard nofile $MAX_FILE_DESCRIPTORS' | sudo tee -a /etc/security/limits.conf"
exec_cmd "echo '* soft nofile $MAX_FILE_DESCRIPTORS' | sudo tee -a /etc/security/limits.conf"
exec_cmd "echo '* hard nproc $MAX_FILE_DESCRIPTORS' | sudo tee -a /etc/security/limits.conf"
exec_cmd "echo '* hard nproc $MAX_FILE_DESCRIPTORS' | sudo tee -a /etc/security/limits.conf"
exec_cmd "sudo sysctl -p"
}
update_watchers() {
## increase the number of file watcher limits
exec_cmd "echo $MAX_WATCHERS | sudo tee -a /proc/sys/fs/inotify/max_user_watches"
exec_cmd "echo 'fs.inotify.max_user_watches=$MAX_WATCHERS' | sudo tee -a /etc/sysctl.conf"
exec_cmd "sudo sysctl -p"
}
update_connection_limits() {
## maximum connection supported by server
exec_cmd "echo 'net.netfilter.nf_conntrack_max=$MAX_CONNECTIONS' | sudo tee -a /etc/sysctl.conf"
## timout for each connection(seconds)
exec_cmd "echo 'net.netfilter.nf_conntrack_generic_timeout=$CONNECTION_TIMEOUT' | sudo tee -a /etc/sysctl.conf"
## timeout of established connection(seconds)
exec_cmd "echo 'net.netfilter.nf_conntrack_tcp_timeout_established=$ESTABLISHED_CONNECTION_TIMEOUT' | sudo tee -a /etc/sysctl.conf"
}
install_prereqs() {
exec_cmd "sudo apt-get install -yy bridge-utils"
}
update_hosts() {
exec_cmd "echo 'updating /etc/hosts to add slave IP entry'"
exec_cmd "echo '$MASTER_IP master-<%=scriptData.clusterModel.id %>-<%= scriptData.masterNodeModel.id %>' | sudo tee -a /etc/hosts"
slave_entry="echo '$SLAVE_IP slave-<%= scriptData.clusterModel.id %>-<%= scriptData.nodeModel.id %>' | sudo tee -a /etc/hosts"
exec_cmd "$slave_entry"
}
clear_network_entities() {
## remove the docker0 bridge created by docker daemon
exec_cmd "echo 'stopping docker'"
exec_cmd "sudo service docker stop || true"
exec_cmd "sudo ip link set dev docker0 down || true"
exec_cmd "sudo brctl delbr docker0 || true"
}
download_kubernetes_release() {
## download and extract kubernetes archive ##
exec_cmd "echo 'Downloading kubernetes release version: $KUBERNETES_RELEASE_VERSION'"
cd $KUBERNETES_DOWNLOAD_PATH
mkdir -p $KUBERNETES_EXTRACT_DIR
kubernetes_download_url="https://github.com/GoogleCloudPlatform/kubernetes/releases/download/$KUBERNETES_RELEASE_VERSION/kubernetes.tar.gz";
exec_cmd "sudo curl --max-time 180 -L $kubernetes_download_url -o kubernetes.tar.gz";
exec_cmd "sudo tar xzvf kubernetes.tar.gz -C $KUBERNETES_EXTRACT_DIR";
}
extract_server_binaries() {
## extract the kubernetes server binaries ##
exec_cmd "echo 'Extracting kubernetes server binaries from $KUBERNETES_DIR'"
cd $KUBERNETES_DIR/server
exec_cmd "sudo tar xzvf kubernetes-server-linux-amd64.tar.gz"
exec_cmd "echo 'Successfully extracted kubernetes server binaries'"
}
update_master_binaries() {
# place binaries in correct folders
exec_cmd "echo 'Updating kubernetes master binaries'"
cd $KUBERNETES_SERVER_BIN_DIR
exec_cmd "sudo cp -vr * $KUBERNETES_EXECUTABLE_LOCATION/"
exec_cmd "echo 'Successfully updated kubernetes server binaries to $KUBERNETES_EXECUTABLE_LOCATION'"
}
download_flannel_release() {
exec_cmd "echo 'Downloading flannel release version: $FLANNEL_VERSION'"
cd $KUBERNETES_DOWNLOAD_PATH
flannel_download_url="https://github.com/coreos/flannel/releases/download/v$FLANNEL_VERSION/flannel-$FLANNEL_VERSION-linux-amd64.tar.gz";
exec_cmd "sudo curl --max-time 180 -L $flannel_download_url -o flannel.tar.gz";
exec_cmd "sudo tar xzvf flannel.tar.gz && cd flannel-$FLANNEL_VERSION";
exec_cmd "sudo mv -v flanneld $FLANNEL_EXECUTABLE_LOCATION/flanneld";
}
update_flanneld_config() {
exec_cmd "echo 'updating flanneld config'"
echo "FLANNELD_OPTS='-etcd-endpoints=http://$MASTER_IP:$ETCD_PORT -iface=$SLAVE_IP -ip-masq=true'" | sudo tee -a /etc/default/flanneld
}
remove_redundant_config() {
# remove the config files for redundant services so that they dont boot up if
# node restarts
exec_cmd "echo 'removing redundant service configs...'"
# removing from /etc/init
exec_cmd "sudo rm -rf /etc/init/kube-apiserver.conf || true"
exec_cmd "sudo rm -rf /etc/init/kube-controller-manager.conf || true"
exec_cmd "sudo rm -rf /etc/init/kube-scheduler.conf || true"
# removing from /etc/init.d
exec_cmd "sudo rm -rf /etc/init.d/kube-apiserver || true"
exec_cmd "sudo rm -rf /etc/init.d/kube-controller-manager || true"
exec_cmd "sudo rm -rf /etc/init.d/kube-scheduler || true"
# removing from /etc/default
exec_cmd "sudo rm -rf /etc/default/kube-apiserver || true"
exec_cmd "sudo rm -rf /etc/default/kube-controller-manager || true"
exec_cmd "sudo rm -rf /etc/default/kube-scheduler || true"
}
stop_services() {
# stop any existing services
exec_cmd "echo 'Stopping slave services...'"
exec_cmd "sudo service kubelet stop || true"
exec_cmd "sudo service kube-proxy stop || true"
}
start_services() {
exec_cmd "echo 'Starting slave services...'"
exec_cmd "sudo service flanneld restart || true"
exec_cmd "sudo service kubelet restart || true "
exec_cmd "sudo service kube-proxy restart || true"
}
check_service_status() {
exec_cmd "echo 'Checking slave services status...'"
sleep 3
exec_cmd "sudo service flanneld status || true"
exec_cmd "sudo service kubelet status || true"
exec_cmd "sudo service kube-proxy status || true"
is_success=true
}
log_service_versions() {
exec_cmd "flanneld --version"
exec_cmd "sudo kubelet --version"
exec_cmd "sudo kube-proxy --version"
exec_cmd "sudo docker version"
exec_cmd "sudo docker info"
}
before_exit() {
## flush out any remaining console
echo $1
echo $2
if [ "$is_success" == true ]; then
echo "__SH__SCRIPT_END_SUCCESS__";
else
echo "__SH__SCRIPT_END_FAILURE__";
fi
}
if [ ! -z $block_uuid ] && [ ! -z $script_uuid ]; then
copy_kube_slave_install_script
else
trap before_exit EXIT
exec_grp "create_manifests_dir"
trap before_exit EXIT
exec_grp "copy_configs"
trap before_exit EXIT
exec_grp "enable_forwarding"
trap before_exit EXIT
exec_grp "update_file_limits"
trap before_exit EXIT
exec_grp "update_watchers"
trap before_exit EXIT
exec_grp "update_connection_limits"
trap before_exit EXIT
exec_grp "install_prereqs"
trap before_exit EXIT
exec_grp "update_hosts"
trap before_exit EXIT
exec_grp "clear_network_entities"
trap before_exit EXIT
exec_grp "stop_services"
trap before_exit EXIT
exec_grp "download_kubernetes_release"
trap before_exit EXIT
exec_grp "download_flannel_release"
trap before_exit EXIT
exec_grp "extract_server_binaries"
trap before_exit EXIT
exec_grp "update_master_binaries"
trap before_exit EXIT
exec_grp "update_flanneld_config"
trap before_exit EXIT
exec_grp "remove_redundant_config"
trap before_exit EXIT
exec_grp "start_services"
trap before_exit EXIT
exec_grp "check_service_status"
trap before_exit EXIT
exec_grp "log_service_versions"
echo "Kubernetes slave install completed"
fi
[terminal 1] $ cd /home/kube
[terminal-1] $ vagrant box add https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box --name trusty64
[terminal-1] $ vagrant up kube-master
[terminal-1] $ vagrant ssh kube-master
[terminal-1] $ ./kube-install.sh master
[terminal-2] $ vagrant up kube-slave
[terminal-2] $ vagrant ssh kube-slave
[terminal-2] $ ./kube-install.sh slave
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Create a private network, which allows host-only access to the machine
# using a specific IP.
config.vm.define "kube-master" do |master|
master.vm.box = "trusty64"
master.vm.network "private_network", ip: "192.168.33.10"
master.vm.hostname = "kube-master"
end
# config.vm.define "kube-slave1" do |slave|
# slave.vm.box = "trusty64"
# slave.vm.network "private_network", ip: "192.168.33.11"
# slave.vm.hostname = "kube-slave1"
# end
# config.vm.define "kube-slave2" do |slave|
# slave.vm.box = "trusty64"
# slave.vm.network "private_network", ip: "192.168.33.12"
# slave.vm.hostname = "kube-slave2"
# end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment