mirror of https://github.com/easzlab/kubeasz.git
525 lines
20 KiB
Plaintext
525 lines
20 KiB
Plaintext
|
#!/bin/bash
|
||
|
# Create & manage k8s clusters by 'kubeasz'
|
||
|
|
||
|
set -o nounset
|
||
|
set -o errexit
|
||
|
#set -o xtrace
|
||
|
|
||
|
function usage() {
|
||
|
echo -e "\033[33mUsage:\033[0m ezctl COMMAND [args]"
|
||
|
cat <<EOF
|
||
|
-------------------------------------------------------------------------------------
|
||
|
Cluster setups:
|
||
|
list to list all of the managed clusters
|
||
|
new <cluster> to start a new k8s deploy with name 'cluster'
|
||
|
setup <cluster> [step] to setup a cluster, also supporting a step-by-step way
|
||
|
start-aio to quickly setup an all-in-one cluster with 'default' settings
|
||
|
|
||
|
Cluster ops:
|
||
|
add-etcd <cluster> <args> to add a etcd-node to the etcd cluster
|
||
|
add-master <cluster> <args> to add a master node to the k8s cluster
|
||
|
add-node <cluster> <args> to add a work node to the k8s cluster
|
||
|
del-etcd <cluster> <ip> to delete a etcd-node from the etcd cluster
|
||
|
del-master <cluster> <ip> to delete a master node from the k8s cluster
|
||
|
del-node <cluster> <ip> to delete a work node from the k8s cluster
|
||
|
upgrade <cluster> to upgrade the k8s cluster
|
||
|
destroy <cluster> to destroy the current cluster, '--purge' to also delete the context
|
||
|
|
||
|
Use "ezctl help <command>" for more information about a given command.
|
||
|
EOF
|
||
|
}
|
||
|
|
||
|
function logger() {
|
||
|
TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S')
|
||
|
case "$1" in
|
||
|
debug)
|
||
|
echo -e "$TIMESTAMP \033[36mDEBUG\033[0m $2"
|
||
|
;;
|
||
|
info)
|
||
|
echo -e "$TIMESTAMP \033[32mINFO\033[0m $2"
|
||
|
;;
|
||
|
warn)
|
||
|
echo -e "$TIMESTAMP \033[33mWARN\033[0m $2"
|
||
|
;;
|
||
|
error)
|
||
|
echo -e "$TIMESTAMP \033[31mERROR\033[0m $2"
|
||
|
;;
|
||
|
*)
|
||
|
;;
|
||
|
esac
|
||
|
}
|
||
|
|
||
|
function help-info() {
|
||
|
case "$1" in
|
||
|
(add-etcd)
|
||
|
echo -e "Usage: easzctl add-etcd <new_etcd_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||
|
;;
|
||
|
(add-master)
|
||
|
echo -e "Usage: easzctl add-master <new_master_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-master.md'"
|
||
|
;;
|
||
|
(add-node)
|
||
|
echo -e "Usage: easzctl add-node <new_node_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||
|
;;
|
||
|
(del-etcd)
|
||
|
echo -e "Usage: easzctl del-etcd <etcd_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||
|
;;
|
||
|
(del-master)
|
||
|
echo -e "Usage: easzctl del-master <master_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-master.md'"
|
||
|
;;
|
||
|
(del-node)
|
||
|
echo -e "Usage: easzctl del-node <node_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||
|
;;
|
||
|
(*)
|
||
|
usage
|
||
|
return 0
|
||
|
;;
|
||
|
esac
|
||
|
}
|
||
|
|
||
|
### Cluster setups functions ##############################
|
||
|
|
||
|
function new() {
|
||
|
# check if already existed
|
||
|
[[ -d "clusters/$1" ]] && { logger error "cluster: $1 already existed"; exit 1; }
|
||
|
[[ "$1" == default ]] && { logger error "name 'default' is reserved for `ezctl start-aio`"; exit 1; }
|
||
|
|
||
|
logger debug "generate custom cluster files in clusters/$1"
|
||
|
mkdir -p "clusters/$1"
|
||
|
cp example/hosts.multi-node "clusters/$1/hosts"
|
||
|
sed -i "s/_cluster_name_/$1/g" "clusters/$1/hosts"
|
||
|
cp example/config.yml "clusters/$1/config.yml"
|
||
|
|
||
|
logger debug "cluster $1: files successfully created."
|
||
|
logger info "next steps 1: to config 'clusters/$1/hosts'"
|
||
|
logger info "next steps 2: to config 'clusters/$1/config.yml'"
|
||
|
}
|
||
|
|
||
|
function setup() {
|
||
|
[[ -d "clusters/$1" ]] || { logger error "invalid config, run 'ezctl new $1' first"; return 1; }
|
||
|
[[ -f "bin/kube-apiserver" ]] || { logger error "no binaries founded, run 'ezdown -D' fist"; return 1; }
|
||
|
|
||
|
logger info "\n cluster:$1 setup begins in 5s, press any key to abort\n:"
|
||
|
! (read -t5 -n1 ANS) || { logger warn "setup aborted"; return 1; }
|
||
|
|
||
|
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" playbooks/90.setup.yml || return 1
|
||
|
}
|
||
|
|
||
|
function list() {
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||
|
echo -e "\nlist of managed contexts (current: \033[33m$CLUSTER\033[0m)"
|
||
|
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||
|
do
|
||
|
echo -e "==> context $i:\t$c"
|
||
|
let "i++"
|
||
|
done
|
||
|
echo -e "\nlist of installed clusters (current: \033[33m$CLUSTER\033[0m)"
|
||
|
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||
|
do
|
||
|
KUBECONF=$BASEPATH/.cluster/$c/config
|
||
|
if [ -f "$KUBECONF" ]; then
|
||
|
echo -e "==> cluster $i:\t$c"
|
||
|
$BASEPATH/bin/kubectl --kubeconfig=$KUBECONF get node
|
||
|
fi
|
||
|
let "i++"
|
||
|
done
|
||
|
}
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
### in-cluster operation functions ##############################
|
||
|
|
||
|
function add-node() {
|
||
|
# check new node's address regexp
|
||
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 1; }
|
||
|
|
||
|
# check if the new node already exsited
|
||
|
sed -n '/^\[kube-master/,/^\[harbor/p' $BASEPATH/hosts|grep "^$1[^0-9]*$" && { echo "[ERROR] node $1 already existed!"; return 2; }
|
||
|
|
||
|
# add a node into 'kube-node' group
|
||
|
sed -i "/\[kube-node/a $1 NEW_NODE=yes ${@:2}" $BASEPATH/hosts
|
||
|
|
||
|
# check if playbook runs successfully
|
||
|
ansible-playbook $BASEPATH/tools/02.addnode.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_NODE=yes/d" $BASEPATH/hosts; return 2; }
|
||
|
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
function add-master() {
|
||
|
# check new master's address regexp
|
||
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||
|
|
||
|
# check if the new master already exsited
|
||
|
sed -n '/^\[kube-master/,/^\[kube-node/p' $BASEPATH/hosts|grep "^$1[^0-9]*$" && { echo "[ERROR] master $1 already existed!"; return 2; }
|
||
|
|
||
|
# add a node into 'kube-master' group
|
||
|
sed -i "/\[kube-master/a $1 NEW_MASTER=yes ${@:2}" $BASEPATH/hosts
|
||
|
|
||
|
# check if playbook runs successfully
|
||
|
ansible-playbook $BASEPATH/tools/03.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
|
||
|
|
||
|
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||
|
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb -e MASTER_CHG=yes || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||
|
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
function add-etcd() {
|
||
|
# check new node's address regexp
|
||
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||
|
|
||
|
# check if the new node already exsited
|
||
|
sed -n '/^\[etcd/,/^\[kube-master/p' $BASEPATH/hosts|grep "^$1[^0-9]*$" && { echo "[ERROR] etcd $1 already existed!"; return 2; }
|
||
|
|
||
|
# input an unique NODE_NAME of the node in etcd cluster
|
||
|
echo "Please input an UNIQUE name(string) for the new node: "
|
||
|
read -t15 NAME
|
||
|
sed -n '/^\[etcd/,/^\[kube-master/p' $BASEPATH/hosts|grep "$NAME" && { echo "[ERROR] name [$NAME] already existed!"; return 2; }
|
||
|
|
||
|
# add a node into 'etcd' group
|
||
|
sed -i "/\[etcd/a $1 NODE_NAME=$NAME" $BASEPATH/hosts
|
||
|
|
||
|
# check if playbook runs successfully
|
||
|
ansible-playbook $BASEPATH/tools/01.addetcd.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NODE_NAME=$NAME/d" $BASEPATH/hosts; return 2; }
|
||
|
|
||
|
# restart apiservers to use the new etcd cluster
|
||
|
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master || { echo "[ERROR] Unexpected failures in master nodes!"; return 2; }
|
||
|
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
function del-etcd() {
|
||
|
# check node's address regexp
|
||
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||
|
|
||
|
#
|
||
|
ansible-playbook $BASEPATH/tools/11.deletcd.yml -e ETCD_TO_DEL=$1 || { echo "[ERROR] Failed to delete etcd node: $1!"; return 2; }
|
||
|
|
||
|
# restart apiservers to use the new etcd cluster
|
||
|
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master || { echo "[ERROR] Unexpected failures in master nodes!"; return 2; }
|
||
|
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
function del-node() {
|
||
|
# check node's address regexp
|
||
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||
|
|
||
|
#
|
||
|
ansible-playbook $BASEPATH/tools/12.delnode.yml -e NODE_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-node': $1!"; return 2; }
|
||
|
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
function del-master() {
|
||
|
# check node's address regexp
|
||
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||
|
|
||
|
#
|
||
|
ansible-playbook $BASEPATH/tools/13.delmaster.yml -e NODE_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-master': $1!"; return 2; }
|
||
|
|
||
|
# reconfig kubeconfig in ansible manage node
|
||
|
ansible-playbook $BASEPATH/roles/deploy/deploy.yml -t create_kctl_cfg
|
||
|
|
||
|
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||
|
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb -e MASTER_CHG=yes || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||
|
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
function upgrade() {
|
||
|
echo -e "[INFO] prepare the new binaries in advance"
|
||
|
echo -e "[INFO] upgrade begin in 5s, press any key to abort\n:"
|
||
|
! (read -t5 -n1 ANS) || { echo "[WARN] upgrade aborted"; return 1; }
|
||
|
ansible-playbook -t upgrade_k8s $BASEPATH/22.upgrade.yml || return 1
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
### cluster-wide operation functions ############################
|
||
|
|
||
|
function save_context() {
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[WARN] Invalid Context"; return 0; }
|
||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||
|
echo "[INFO] save context: $CLUSTER"
|
||
|
echo "[INFO] save $CLUSTER roles' configration"
|
||
|
for ROLE in $(ls $BASEPATH/roles);
|
||
|
do
|
||
|
if [ -d "$BASEPATH/roles/$ROLE/defaults" ]; then
|
||
|
mkdir -p $BASEPATH/.cluster/$CLUSTER/roles/$ROLE/defaults/
|
||
|
cp -fpr $BASEPATH/roles/$ROLE/defaults/* $BASEPATH/.cluster/$CLUSTER/roles/$ROLE/defaults/
|
||
|
fi
|
||
|
done
|
||
|
|
||
|
if [ -f "$BASEPATH/hosts" ];then
|
||
|
echo "[INFO] save $CLUSTER ansible hosts"
|
||
|
cp -fp $BASEPATH/hosts $BASEPATH/.cluster/$CLUSTER/
|
||
|
fi
|
||
|
|
||
|
if [ -f /root/.kube/config ];then
|
||
|
echo "[INFO] save $CLUSTER kubeconfig"
|
||
|
cp -fp /root/.kube/config $BASEPATH/.cluster/$CLUSTER/
|
||
|
fi
|
||
|
|
||
|
if [ -f "$BASEPATH/.cluster/kube-proxy.kubeconfig" ];then
|
||
|
echo "[INFO] save $CLUSTER kube-proxy.kubeconfig"
|
||
|
cp -fp $BASEPATH/.cluster/kube-proxy.kubeconfig $BASEPATH/.cluster/$CLUSTER/kube-proxy.kubeconfig
|
||
|
fi
|
||
|
|
||
|
if [ -d "$BASEPATH/.cluster/ssl" ];then
|
||
|
echo "[INFO] save $CLUSTER certs"
|
||
|
cp -rfp $BASEPATH/.cluster/ssl $BASEPATH/.cluster/$CLUSTER/ssl
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
function install_context() {
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] Invalid Context"; return 1; }
|
||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||
|
echo "[INFO] install context: $CLUSTER"
|
||
|
echo "[INFO] install $CLUSTER roles' configration"
|
||
|
for ROLE in $(ls $BASEPATH/.cluster/$CLUSTER/roles);
|
||
|
do
|
||
|
cp -fp $BASEPATH/.cluster/$CLUSTER/roles/$ROLE/defaults/* $BASEPATH/roles/$ROLE/defaults/
|
||
|
done
|
||
|
|
||
|
if [ -f "$BASEPATH/.cluster/$CLUSTER/hosts" ];then
|
||
|
echo "[INFO] install $CLUSTER ansible hosts"
|
||
|
cp -fp $BASEPATH/.cluster/$CLUSTER/hosts $BASEPATH/
|
||
|
fi
|
||
|
|
||
|
if [ -f "$BASEPATH/.cluster/$CLUSTER/config" ];then
|
||
|
echo "[INFO] install $CLUSTER kubeconfig"
|
||
|
cp -fp $BASEPATH/.cluster/$CLUSTER/config /root/.kube/
|
||
|
fi
|
||
|
|
||
|
if [ -f "$BASEPATH/.cluster/$CLUSTER/kube-proxy.kubeconfig" ];then
|
||
|
echo "[INFO] install $CLUSTER kube-proxy.kubeconfig"
|
||
|
cp -fp $BASEPATH/.cluster/$CLUSTER/kube-proxy.kubeconfig $BASEPATH/.cluster/kube-proxy.kubeconfig
|
||
|
fi
|
||
|
|
||
|
if [ -d "$BASEPATH/.cluster/$CLUSTER/ssl" ];then
|
||
|
echo "[INFO] install $CLUSTER certs"
|
||
|
cp -rfp $BASEPATH/.cluster/$CLUSTER/ssl $BASEPATH/.cluster/ssl
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
function setup() {
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||
|
[ -f "$BASEPATH/bin/kube-apiserver" ] || { echo "[ERROR] no binaries found, download then fist"; return 1; }
|
||
|
[ -f "$BASEPATH/hosts" ] || { echo "[ERROR] no ansible hosts found, read 'docs/setup/00-planning_and_overall_intro.md'"; return 1; }
|
||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||
|
echo -e "\n[INFO] setup cluster with context: $CLUSTER"
|
||
|
echo -e "[INFO] setup begin in 5s, press any key to abort\n:"
|
||
|
! (read -t5 -n1 ANS) || { echo "[WARN] setup aborted"; return 1; }
|
||
|
ansible-playbook $BASEPATH/90.setup.yml || return 1
|
||
|
save_context
|
||
|
}
|
||
|
|
||
|
function list() {
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||
|
echo -e "\nlist of managed contexts (current: \033[33m$CLUSTER\033[0m)"
|
||
|
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||
|
do
|
||
|
echo -e "==> context $i:\t$c"
|
||
|
let "i++"
|
||
|
done
|
||
|
echo -e "\nlist of installed clusters (current: \033[33m$CLUSTER\033[0m)"
|
||
|
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||
|
do
|
||
|
KUBECONF=$BASEPATH/.cluster/$c/config
|
||
|
if [ -f "$KUBECONF" ]; then
|
||
|
echo -e "==> cluster $i:\t$c"
|
||
|
$BASEPATH/bin/kubectl --kubeconfig=$KUBECONF get node
|
||
|
fi
|
||
|
let "i++"
|
||
|
done
|
||
|
}
|
||
|
|
||
|
function destroy() {
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||
|
echo -n "[WARN] DELETE cluster: $CLUSTER, Continue? (y/n): "
|
||
|
read -t10 -n1 ANS || { echo -e "\n[WARN] timeout, destroy aborted"; return 1; }
|
||
|
if [[ -n $ANS && $ANS == y ]];then
|
||
|
echo -e "\n[INFO] clean all nodes of cluster in 5s"
|
||
|
sleep 5
|
||
|
ansible-playbook $BASEPATH/99.clean.yml
|
||
|
rm -f $BASEPATH/.cluster/$CLUSTER/config
|
||
|
[ "$#" -gt 0 ] || { return 0; }
|
||
|
if [[ -n $1 && $1 == --purge ]];then
|
||
|
echo "[INFO] delete current context"
|
||
|
rm -rf $BASEPATH/.cluster/$CLUSTER
|
||
|
rm -rf $BASEPATH/hosts /root/.kube/*
|
||
|
echo "[INFO] change current context to default"
|
||
|
echo default > $BASEPATH/.cluster/current_cluster
|
||
|
install_context
|
||
|
fi
|
||
|
else
|
||
|
echo -e "\n[WARN] destroy aborted"; return 1;
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
function start-aio(){
|
||
|
checkout aio
|
||
|
set +u
|
||
|
# Check ENV 'HOST_IP', if exist indecates running in a docker container, otherwise running in a host machine
|
||
|
if [[ -z $HOST_IP ]];then
|
||
|
# easzctl runs in a host machine, get host's ip
|
||
|
HOST_IF=$(ip route|grep default|cut -d' ' -f5)
|
||
|
HOST_IP=$(ip a|grep "$HOST_IF$"|awk '{print $2}'|cut -d'/' -f1)
|
||
|
fi
|
||
|
set -u
|
||
|
cp -f $BASEPATH/example/hosts.allinone $BASEPATH/hosts
|
||
|
sed -i "s/192.168.1.1/$HOST_IP/g" $BASEPATH/hosts
|
||
|
setup
|
||
|
}
|
||
|
|
||
|
### extra operation functions ###################################
|
||
|
|
||
|
function basic-auth(){
|
||
|
OPTIND=2
|
||
|
CONFIG=$BASEPATH/roles/kube-master/defaults/main.yml
|
||
|
EX_VARS=""
|
||
|
while getopts "sSu:p:" OPTION; do
|
||
|
case $OPTION in
|
||
|
s)
|
||
|
EX_VARS="BASIC_AUTH_ENABLE=yes $EX_VARS"
|
||
|
ENABLED=yes
|
||
|
;;
|
||
|
S)
|
||
|
grep BASIC_AUTH_ENABLE $CONFIG|grep no > /dev/null && \
|
||
|
{ echo -e "\n[WARN]basic-auth already disabled!\n"; return 1; }
|
||
|
EX_VARS="BASIC_AUTH_ENABLE=no $EX_VARS"
|
||
|
ENABLED=no
|
||
|
;;
|
||
|
u)
|
||
|
EX_VARS="BASIC_AUTH_USER=$OPTARG $EX_VARS"
|
||
|
sed -i "s/BASIC_AUTH_USER.*$/BASIC_AUTH_USER: '$OPTARG'/g" $CONFIG
|
||
|
;;
|
||
|
p)
|
||
|
EX_VARS="BASIC_AUTH_PASS=$OPTARG $EX_VARS"
|
||
|
sed -i "s/BASIC_AUTH_PASS.*$/BASIC_AUTH_PASS: '$OPTARG'/g" $CONFIG
|
||
|
;;
|
||
|
?)
|
||
|
help-info basic-auth
|
||
|
return 1
|
||
|
;;
|
||
|
esac
|
||
|
done
|
||
|
|
||
|
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master -e "$EX_VARS" || { return 1; }
|
||
|
sed -i "s/BASIC_AUTH_ENABLE.*$/BASIC_AUTH_ENABLE: '$ENABLED'/g" $CONFIG
|
||
|
if [[ $ENABLED == yes ]];then
|
||
|
echo -e "\n[INFO]basic-auth for apiserver is enabled!"
|
||
|
sed -n '/BASIC_AUTH_USER/p' $CONFIG
|
||
|
sed -n '/BASIC_AUTH_PASS/p' $CONFIG
|
||
|
elif [[ $ENABLED == no ]];then
|
||
|
echo -e "\n[INFO]basic-auth for apiserver is disabled!\n"
|
||
|
fi
|
||
|
# save current cluster context if needed
|
||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
### Main Lines ##################################################
|
||
|
|
||
|
BASEPATH=/etc/kubeasz
|
||
|
|
||
|
[ "$#" -gt 0 ] || { usage >&2; exit 2; }
|
||
|
|
||
|
case "$1" in
|
||
|
### in-cluster operations #####################
|
||
|
(add-etcd)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: add a etcd node"
|
||
|
CMD="add-etcd $2"
|
||
|
;;
|
||
|
(add-master)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: add a k8s master node"
|
||
|
CMD="add-master $2 ${@:3}"
|
||
|
;;
|
||
|
(add-node)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: add a k8s work node"
|
||
|
CMD="add-node $2 ${@:3}"
|
||
|
;;
|
||
|
(del-etcd)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: delete a etcd node"
|
||
|
CMD="del-etcd $2"
|
||
|
;;
|
||
|
(del-master)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: delete a kube-master"
|
||
|
CMD="del-master $2"
|
||
|
;;
|
||
|
(del-node)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: delete a kube-node"
|
||
|
CMD="del-node $2"
|
||
|
;;
|
||
|
(upgrade)
|
||
|
ACTION="Action: upgrade the cluster"
|
||
|
CMD="upgrade"
|
||
|
;;
|
||
|
### cluster-wide operations #######################
|
||
|
(checkout)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
ACTION="Action: checkout cluster context"
|
||
|
CMD="checkout $2"
|
||
|
;;
|
||
|
(destroy)
|
||
|
ACTION="Action: destroy current cluster"
|
||
|
if [ "$#" -gt 1 ];then
|
||
|
CMD="destroy $2"
|
||
|
else
|
||
|
CMD="destroy"
|
||
|
fi
|
||
|
;;
|
||
|
(list)
|
||
|
ACTION="Action: list all of clusters managed"
|
||
|
CMD="list"
|
||
|
;;
|
||
|
(setup)
|
||
|
ACTION="Action: setup cluster with current context"
|
||
|
CMD="setup"
|
||
|
;;
|
||
|
(start-aio)
|
||
|
ACTION="Action: start an AllInOne cluster"
|
||
|
CMD="start-aio"
|
||
|
;;
|
||
|
(help)
|
||
|
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||
|
help-info $2
|
||
|
exit 0
|
||
|
;;
|
||
|
### extra operations ##############################
|
||
|
(basic-auth)
|
||
|
[ "$#" -gt 1 ] || { help-info $1; exit 2; }
|
||
|
ACTION="Action: enable/disable apiserver's basic-auth"
|
||
|
CMD="basic-auth $*"
|
||
|
;;
|
||
|
(*)
|
||
|
usage
|
||
|
exit 0
|
||
|
;;
|
||
|
esac
|
||
|
|
||
|
process_cmd
|
||
|
|