mirror of https://github.com/easzlab/kubeasz.git
重写安装流程
parent
524e2390fa
commit
a85c649805
|
@ -1,6 +1,5 @@
|
|||
# download directory
|
||||
down/*
|
||||
!down/download.sh
|
||||
!down/offline_images
|
||||
|
||||
# binaries directory
|
||||
bin/*
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
# This script describes where to download the official released binaries needed
|
||||
# It's suggested to download using 'ezdown -D', everything needed will be ready in '/etc/kubeasz'
|
||||
|
||||
# example releases
|
||||
K8S_VER=v1.13.7
|
||||
ETCD_VER=v3.3.10
|
||||
DOCKER_VER=18.09.6
|
||||
CNI_VER=v0.7.5
|
||||
DOCKER_COMPOSE_VER=1.23.2
|
||||
HARBOR_VER=v1.9.4
|
||||
CONTAINERD_VER=1.2.6
|
||||
|
||||
echo -e "\nNote: It's strongly recommended that downloading with 'ezdown -D', everything needed will be ready in '/etc/kubeasz'."
|
||||
|
||||
echo -e "\n----download k8s binary at:"
|
||||
echo -e https://dl.k8s.io/${K8S_VER}/kubernetes-server-linux-amd64.tar.gz
|
||||
|
||||
echo -e "\n----download etcd binary at:"
|
||||
echo -e https://github.com/coreos/etcd/releases/download/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
echo -e https://storage.googleapis.com/etcd/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz
|
||||
|
||||
echo -e "\n----download docker binary at:"
|
||||
echo -e https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz
|
||||
|
||||
echo -e "\n----download ca tools at:"
|
||||
echo -e https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
|
||||
echo -e https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
|
||||
echo -e https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
|
||||
|
||||
echo -e "\n----download docker-compose at:"
|
||||
echo -e https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VER}/docker-compose-Linux-x86_64
|
||||
|
||||
echo -e "\n----download harbor-offline-installer at:"
|
||||
echo -e https://storage.googleapis.com/harbor-releases/harbor-offline-installer-${HARBOR_VER}.tgz
|
||||
|
||||
echo -e "\n----download cni plugins at:"
|
||||
echo -e https://github.com/containernetworking/plugins/releases
|
||||
|
||||
echo -e "\n----download containerd at:"
|
||||
echo -e https://storage.googleapis.com/cri-containerd-release/cri-containerd-${CONTAINERD_VER}.linux-amd64.tar.gz
|
|
@ -1,7 +1,6 @@
|
|||
# 'etcd' cluster should have odd member(s) (1,3,5,...)
|
||||
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
|
||||
[etcd]
|
||||
192.168.1.1 NODE_NAME=etcd1
|
||||
192.168.1.1
|
||||
|
||||
# master node(s)
|
||||
[kube-master]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
# 'etcd' cluster should have odd member(s) (1,3,5,...)
|
||||
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
|
||||
[etcd]
|
||||
192.168.1.1 NODE_NAME=etcd1
|
||||
192.168.1.2 NODE_NAME=etcd2
|
||||
192.168.1.3 NODE_NAME=etcd3
|
||||
192.168.1.1
|
||||
192.168.1.2
|
||||
192.168.1.3
|
||||
|
||||
# master node(s)
|
||||
[kube-master]
|
||||
|
|
395
ezctl
395
ezctl
|
@ -1,5 +1,5 @@
|
|||
#!/bin/bash
|
||||
# Create & manage k8s clusters by 'kubeasz'
|
||||
# Create & manage k8s clusters
|
||||
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
@ -11,12 +11,15 @@ function usage() {
|
|||
-------------------------------------------------------------------------------------
|
||||
Cluster setups:
|
||||
list to list all of the managed clusters
|
||||
checkout <cluster> to switch default kubeconfig of the cluster
|
||||
new <cluster> to start a new k8s deploy with name 'cluster'
|
||||
setup <cluster> <step> to setup a cluster, also supporting a step-by-step way
|
||||
start <cluster> to start all of the k8s services stopped by 'ezctl stop'
|
||||
stop <cluster> to stop all of the k8s services temporarily
|
||||
upgrade <cluster> to upgrade the k8s cluster
|
||||
destroy <cluster> to destroy the k8s cluster
|
||||
backup <cluster> to backup the cluster state (etcd snapshot)
|
||||
restore <cluster> to restore the cluster state from backups
|
||||
start-aio to quickly setup an all-in-one cluster with 'default' settings
|
||||
|
||||
Cluster ops:
|
||||
|
@ -76,26 +79,25 @@ function help-info() {
|
|||
usage-setup
|
||||
;;
|
||||
(add-etcd)
|
||||
echo -e "Usage: easzctl add-etcd <new_etcd_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||||
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||||
;;
|
||||
(add-master)
|
||||
echo -e "Usage: easzctl add-master <new_master_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-master.md'"
|
||||
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-master.md'"
|
||||
;;
|
||||
(add-node)
|
||||
echo -e "Usage: easzctl add-node <new_node_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||||
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||||
;;
|
||||
(del-etcd)
|
||||
echo -e "Usage: easzctl del-etcd <etcd_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||||
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||||
;;
|
||||
(del-master)
|
||||
echo -e "Usage: easzctl del-master <master_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-master.md'"
|
||||
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-master.md'"
|
||||
;;
|
||||
(del-node)
|
||||
echo -e "Usage: easzctl del-node <node_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||||
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||||
;;
|
||||
(*)
|
||||
usage
|
||||
return 0
|
||||
echo -e "todo: help info $1"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
@ -105,7 +107,6 @@ function help-info() {
|
|||
function new() {
|
||||
# check if already existed
|
||||
[[ -d "clusters/$1" ]] && { logger error "cluster: $1 already existed"; exit 1; }
|
||||
[[ "$1" == default ]] && { logger error "name 'default' is reserved"; exit 1; }
|
||||
|
||||
logger debug "generate custom cluster files in clusters/$1"
|
||||
mkdir -p "clusters/$1"
|
||||
|
@ -179,8 +180,8 @@ function setup() {
|
|||
;;
|
||||
esac
|
||||
|
||||
logger info "cluster:$1 setup begins in 5s, press any key to abort:\n"
|
||||
! (read -t5 -n1 ANS) || { logger warn "setup aborted"; return 1; }
|
||||
logger info "cluster:$1 setup step:$2 begins in 5s, press any key to abort:\n"
|
||||
! (read -r -t5 -n1) || { logger warn "setup abort"; return 1; }
|
||||
|
||||
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" "playbooks/$PLAY_BOOK" || return 1
|
||||
}
|
||||
|
@ -215,314 +216,160 @@ function cmd() {
|
|||
esac
|
||||
|
||||
logger info "cluster:$1 $2 begins in 5s, press any key to abort:\n"
|
||||
! (read -t5 -n1 ANS) || { logger warn "$2 aborted"; return 1; }
|
||||
! (read -r -t5 -n1) || { logger warn "$2 abort"; return 1; }
|
||||
|
||||
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" "playbooks/$PLAY_BOOK" || return 1
|
||||
}
|
||||
|
||||
|
||||
function list() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
echo -e "\nlist of managed contexts (current: \033[33m$CLUSTER\033[0m)"
|
||||
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||||
[[ -d ./clusters ]] || { logger error "cluster not found, run 'ezctl new' first"; return 1; }
|
||||
[[ -f ~/.kube/config ]] || { logger error "kubeconfig not found, run 'ezctl setup' first"; return 1; }
|
||||
which md5sum > /dev/null 2>&1 || { logger error "md5sum not found"; return 1; }
|
||||
|
||||
CLUSTERS=$(cd clusters && echo -- *)
|
||||
CFG_MD5=$(md5sum -t ~/.kube/config |cut -d' ' -f1)
|
||||
cd "$BASE"
|
||||
|
||||
logger info "list of managed clusters:"
|
||||
i=1; for c in $CLUSTERS;
|
||||
do
|
||||
echo -e "==> context $i:\t$c"
|
||||
let "i++"
|
||||
done
|
||||
echo -e "\nlist of installed clusters (current: \033[33m$CLUSTER\033[0m)"
|
||||
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||||
do
|
||||
KUBECONF=$BASEPATH/.cluster/$c/config
|
||||
if [ -f "$KUBECONF" ]; then
|
||||
echo -e "==> cluster $i:\t$c"
|
||||
$BASEPATH/bin/kubectl --kubeconfig=$KUBECONF get node
|
||||
if [[ -f "clusters/$c/kubectl.kubeconfig" ]];then
|
||||
c_md5=$(md5sum -t "clusters/$c/kubectl.kubeconfig" |cut -d' ' -f1)
|
||||
if [[ "$c_md5" = "$CFG_MD5" ]];then
|
||||
echo -e "==> cluster $i:\t$c (\033[32mcurrent\033[0m)"
|
||||
else
|
||||
echo -e "==> cluster $i:\t$c"
|
||||
fi
|
||||
let "i++"
|
||||
fi
|
||||
let "i++"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
function checkout() {
|
||||
[[ -d "clusters/$1" ]] || { logger error "invalid config, run 'ezctl new $1' first"; return 1; }
|
||||
[[ -f "clusters/$1/kubectl.kubeconfig" ]] || { logger error "invalid kubeconfig, run 'ezctl setup $1' first"; return 1; }
|
||||
logger info "set default kubeconfig: cluster $1 (\033[32mcurrent\033[0m)"
|
||||
/bin/cp -f "clusters/$1/kubectl.kubeconfig" ~/.kube/config
|
||||
}
|
||||
|
||||
### in-cluster operation functions ##############################
|
||||
|
||||
function add-node() {
|
||||
# check new node's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 1; }
|
||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||
|
||||
# check if the new node already exsited
|
||||
sed -n '/^\[kube-master/,/^\[harbor/p' $BASEPATH/hosts|grep "^$1[^0-9]*$" && { echo "[ERROR] node $1 already existed!"; return 2; }
|
||||
sed -n '/^\[kube-master/,/^\[harbor/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "node $2 already existed in $BASE/clusters/$1/hosts"; return 2; }
|
||||
|
||||
# add a node into 'kube-node' group
|
||||
sed -i "/\[kube-node/a $1 NEW_NODE=yes ${@:2}" $BASEPATH/hosts
|
||||
logger info "add $2 into 'kube-node' group"
|
||||
sed -i "/\[kube-node/a $2 NEW_NODE=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
||||
|
||||
# check if playbook runs successfully
|
||||
ansible-playbook $BASEPATH/tools/02.addnode.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_NODE=yes/d" $BASEPATH/hosts; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
logger info "start to add a work node:$2 into cluster:$1"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/22.addnode.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
||||
}
|
||||
|
||||
function add-master() {
|
||||
# check new master's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||
|
||||
# check if the new master already exsited
|
||||
sed -n '/^\[kube-master/,/^\[kube-node/p' $BASEPATH/hosts|grep "^$1[^0-9]*$" && { echo "[ERROR] master $1 already existed!"; return 2; }
|
||||
sed -n '/^\[kube-master/,/^\[kube-node/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "master $2 already existed!"; return 2; }
|
||||
|
||||
# add a node into 'kube-master' group
|
||||
sed -i "/\[kube-master/a $1 NEW_MASTER=yes ${@:2}" $BASEPATH/hosts
|
||||
logger info "add $2 into 'kube-master' group"
|
||||
sed -i "/\[kube-master/a $2 NEW_MASTER=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
||||
|
||||
# check if playbook runs successfully
|
||||
ansible-playbook $BASEPATH/tools/03.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
|
||||
logger info "start to add a master node:$2 into cluster:$1"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/23.addmaster.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
||||
|
||||
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb -e MASTER_CHG=yes || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
logger info "reconfigure and restart the haproxy service on 'kube-node' nodes"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml"
|
||||
}
|
||||
|
||||
function add-etcd() {
|
||||
# check new node's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||
|
||||
# check if the new node already exsited
|
||||
sed -n '/^\[etcd/,/^\[kube-master/p' $BASEPATH/hosts|grep "^$1[^0-9]*$" && { echo "[ERROR] etcd $1 already existed!"; return 2; }
|
||||
sed -n '/^\[etcd/,/^\[kube-master/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "etcd $2 already existed!"; return 2; }
|
||||
|
||||
# input an unique NODE_NAME of the node in etcd cluster
|
||||
echo "Please input an UNIQUE name(string) for the new node: "
|
||||
read -t15 NAME
|
||||
sed -n '/^\[etcd/,/^\[kube-master/p' $BASEPATH/hosts|grep "$NAME" && { echo "[ERROR] name [$NAME] already existed!"; return 2; }
|
||||
logger info "add $2 into 'etcd' group"
|
||||
sed -i "/\[etcd/a $2 NEW_ETCD=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
||||
|
||||
# add a node into 'etcd' group
|
||||
sed -i "/\[etcd/a $1 NODE_NAME=$NAME" $BASEPATH/hosts
|
||||
logger info "start to add a etcd node:$2 into cluster:$1"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/21.addetcd.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
||||
|
||||
# check if playbook runs successfully
|
||||
ansible-playbook $BASEPATH/tools/01.addetcd.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NODE_NAME=$NAME/d" $BASEPATH/hosts; return 2; }
|
||||
|
||||
# restart apiservers to use the new etcd cluster
|
||||
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master || { echo "[ERROR] Unexpected failures in master nodes!"; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
logger info "reconfig &restart the etcd cluster"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/02.etcd.yml" -t restart_etcd -e "@clusters/$1/config.yml"
|
||||
|
||||
logger info "restart apiservers to use the new etcd cluster"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/04.kube-master.yml" -t restart_master -e "@clusters/$1/config.yml"
|
||||
}
|
||||
|
||||
function del-etcd() {
|
||||
# check node's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||
|
||||
#
|
||||
ansible-playbook $BASEPATH/tools/11.deletcd.yml -e ETCD_TO_DEL=$1 || { echo "[ERROR] Failed to delete etcd node: $1!"; return 2; }
|
||||
logger warn "start to delete the etcd node:$2 from cluster:$1"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/31.deletcd.yml" -e "ETCD_TO_DEL=$2" -e "CLUSTER=$1" -e "@clusters/$1/config.yml"
|
||||
|
||||
# restart apiservers to use the new etcd cluster
|
||||
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master || { echo "[ERROR] Unexpected failures in master nodes!"; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
logger info "reconfig &restart the etcd cluster"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/02.etcd.yml" -t restart_etcd -e "@clusters/$1/config.yml"
|
||||
|
||||
logger info "restart apiservers to use the new etcd cluster"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/04.kube-master.yml" -t restart_master -e "@clusters/$1/config.yml"
|
||||
}
|
||||
|
||||
function del-node() {
|
||||
# check node's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger "Invalid ip add:$2"; return 2; }
|
||||
|
||||
#
|
||||
ansible-playbook $BASEPATH/tools/12.delnode.yml -e NODE_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-node': $1!"; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
logger warn "start to delete the node:$2 from cluster:$1"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/32.delnode.yml" -e "NODE_TO_DEL=$2" -e "CLUSTER=$1" -e "@clusters/$1/config.yml"
|
||||
}
|
||||
|
||||
function del-master() {
|
||||
# check node's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 2; }
|
||||
|
||||
#
|
||||
ansible-playbook $BASEPATH/tools/13.delmaster.yml -e NODE_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-master': $1!"; return 2; }
|
||||
logger warn "start to delete the master:$2 from cluster:$1"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/33.delmaster.yml" -e "NODE_TO_DEL=$2" -e "CLUSTER=$1" -e "@clusters/$1/config.yml"
|
||||
|
||||
# reconfig kubeconfig in ansible manage node
|
||||
ansible-playbook $BASEPATH/roles/deploy/deploy.yml -t create_kctl_cfg
|
||||
logger info "reconfig kubeconfig in ansible manage node"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/roles/deploy/deploy.yml" -t create_kctl_cfg -e "@clusters/$1/config.yml"
|
||||
|
||||
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb -e MASTER_CHG=yes || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
logger info "reconfigure and restart the haproxy service on 'kube-node' nodes"
|
||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml"
|
||||
}
|
||||
|
||||
function upgrade() {
|
||||
echo -e "[INFO] prepare the new binaries in advance"
|
||||
echo -e "[INFO] upgrade begin in 5s, press any key to abort\n:"
|
||||
! (read -t5 -n1 ANS) || { echo "[WARN] upgrade aborted"; return 1; }
|
||||
ansible-playbook -t upgrade_k8s $BASEPATH/22.upgrade.yml || return 1
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
}
|
||||
### cluster-wide operation functions ############################
|
||||
|
||||
function save_context() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[WARN] Invalid Context"; return 0; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
echo "[INFO] save context: $CLUSTER"
|
||||
echo "[INFO] save $CLUSTER roles' configration"
|
||||
for ROLE in $(ls $BASEPATH/roles);
|
||||
do
|
||||
if [ -d "$BASEPATH/roles/$ROLE/defaults" ]; then
|
||||
mkdir -p $BASEPATH/.cluster/$CLUSTER/roles/$ROLE/defaults/
|
||||
cp -fpr $BASEPATH/roles/$ROLE/defaults/* $BASEPATH/.cluster/$CLUSTER/roles/$ROLE/defaults/
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -f "$BASEPATH/hosts" ];then
|
||||
echo "[INFO] save $CLUSTER ansible hosts"
|
||||
cp -fp $BASEPATH/hosts $BASEPATH/.cluster/$CLUSTER/
|
||||
fi
|
||||
|
||||
if [ -f /root/.kube/config ];then
|
||||
echo "[INFO] save $CLUSTER kubeconfig"
|
||||
cp -fp /root/.kube/config $BASEPATH/.cluster/$CLUSTER/
|
||||
fi
|
||||
|
||||
if [ -f "$BASEPATH/.cluster/kube-proxy.kubeconfig" ];then
|
||||
echo "[INFO] save $CLUSTER kube-proxy.kubeconfig"
|
||||
cp -fp $BASEPATH/.cluster/kube-proxy.kubeconfig $BASEPATH/.cluster/$CLUSTER/kube-proxy.kubeconfig
|
||||
fi
|
||||
|
||||
if [ -d "$BASEPATH/.cluster/ssl" ];then
|
||||
echo "[INFO] save $CLUSTER certs"
|
||||
cp -rfp $BASEPATH/.cluster/ssl $BASEPATH/.cluster/$CLUSTER/ssl
|
||||
fi
|
||||
}
|
||||
|
||||
function install_context() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] Invalid Context"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
echo "[INFO] install context: $CLUSTER"
|
||||
echo "[INFO] install $CLUSTER roles' configration"
|
||||
for ROLE in $(ls $BASEPATH/.cluster/$CLUSTER/roles);
|
||||
do
|
||||
cp -fp $BASEPATH/.cluster/$CLUSTER/roles/$ROLE/defaults/* $BASEPATH/roles/$ROLE/defaults/
|
||||
done
|
||||
|
||||
if [ -f "$BASEPATH/.cluster/$CLUSTER/hosts" ];then
|
||||
echo "[INFO] install $CLUSTER ansible hosts"
|
||||
cp -fp $BASEPATH/.cluster/$CLUSTER/hosts $BASEPATH/
|
||||
fi
|
||||
|
||||
if [ -f "$BASEPATH/.cluster/$CLUSTER/config" ];then
|
||||
echo "[INFO] install $CLUSTER kubeconfig"
|
||||
cp -fp $BASEPATH/.cluster/$CLUSTER/config /root/.kube/
|
||||
fi
|
||||
|
||||
if [ -f "$BASEPATH/.cluster/$CLUSTER/kube-proxy.kubeconfig" ];then
|
||||
echo "[INFO] install $CLUSTER kube-proxy.kubeconfig"
|
||||
cp -fp $BASEPATH/.cluster/$CLUSTER/kube-proxy.kubeconfig $BASEPATH/.cluster/kube-proxy.kubeconfig
|
||||
fi
|
||||
|
||||
if [ -d "$BASEPATH/.cluster/$CLUSTER/ssl" ];then
|
||||
echo "[INFO] install $CLUSTER certs"
|
||||
cp -rfp $BASEPATH/.cluster/$CLUSTER/ssl $BASEPATH/.cluster/ssl
|
||||
fi
|
||||
}
|
||||
|
||||
function list() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
echo -e "\nlist of managed contexts (current: \033[33m$CLUSTER\033[0m)"
|
||||
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||||
do
|
||||
echo -e "==> context $i:\t$c"
|
||||
let "i++"
|
||||
done
|
||||
echo -e "\nlist of installed clusters (current: \033[33m$CLUSTER\033[0m)"
|
||||
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||||
do
|
||||
KUBECONF=$BASEPATH/.cluster/$c/config
|
||||
if [ -f "$KUBECONF" ]; then
|
||||
echo -e "==> cluster $i:\t$c"
|
||||
$BASEPATH/bin/kubectl --kubeconfig=$KUBECONF get node
|
||||
fi
|
||||
let "i++"
|
||||
done
|
||||
}
|
||||
|
||||
function start-aio(){
|
||||
checkout aio
|
||||
set +u
|
||||
# Check ENV 'HOST_IP', if exist indecates running in a docker container, otherwise running in a host machine
|
||||
# Check ENV 'HOST_IP', exists if the CMD 'ezctl' running in a docker container
|
||||
if [[ -z $HOST_IP ]];then
|
||||
# easzctl runs in a host machine, get host's ip
|
||||
# ezctl runs in a host machine, get host's ip
|
||||
HOST_IF=$(ip route|grep default|cut -d' ' -f5)
|
||||
HOST_IP=$(ip a|grep "$HOST_IF$"|awk '{print $2}'|cut -d'/' -f1)
|
||||
HOST_IP=$(ip a|grep "$HOST_IF$"|head -n1|awk '{print $2}'|cut -d'/' -f1)
|
||||
fi
|
||||
set -u
|
||||
cp -f $BASEPATH/example/hosts.allinone $BASEPATH/hosts
|
||||
sed -i "s/192.168.1.1/$HOST_IP/g" $BASEPATH/hosts
|
||||
setup
|
||||
|
||||
logger info "get local host ipadd: $HOST_IP"
|
||||
|
||||
new default
|
||||
/bin/cp -f example/hosts.allinone "clusters/default/hosts"
|
||||
sed -i "s/_cluster_name_/default/g" "clusters/default/hosts"
|
||||
sed -i "s/192.168.1.1/$HOST_IP/g" "clusters/default/hosts"
|
||||
|
||||
setup default all
|
||||
}
|
||||
|
||||
### extra operation functions ###################################
|
||||
|
||||
function basic-auth(){
|
||||
OPTIND=2
|
||||
CONFIG=$BASEPATH/roles/kube-master/defaults/main.yml
|
||||
EX_VARS=""
|
||||
while getopts "sSu:p:" OPTION; do
|
||||
case $OPTION in
|
||||
s)
|
||||
EX_VARS="BASIC_AUTH_ENABLE=yes $EX_VARS"
|
||||
ENABLED=yes
|
||||
;;
|
||||
S)
|
||||
grep BASIC_AUTH_ENABLE $CONFIG|grep no > /dev/null && \
|
||||
{ echo -e "\n[WARN]basic-auth already disabled!\n"; return 1; }
|
||||
EX_VARS="BASIC_AUTH_ENABLE=no $EX_VARS"
|
||||
ENABLED=no
|
||||
;;
|
||||
u)
|
||||
EX_VARS="BASIC_AUTH_USER=$OPTARG $EX_VARS"
|
||||
sed -i "s/BASIC_AUTH_USER.*$/BASIC_AUTH_USER: '$OPTARG'/g" $CONFIG
|
||||
;;
|
||||
p)
|
||||
EX_VARS="BASIC_AUTH_PASS=$OPTARG $EX_VARS"
|
||||
sed -i "s/BASIC_AUTH_PASS.*$/BASIC_AUTH_PASS: '$OPTARG'/g" $CONFIG
|
||||
;;
|
||||
?)
|
||||
help-info basic-auth
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master -e "$EX_VARS" || { return 1; }
|
||||
sed -i "s/BASIC_AUTH_ENABLE.*$/BASIC_AUTH_ENABLE: '$ENABLED'/g" $CONFIG
|
||||
if [[ $ENABLED == yes ]];then
|
||||
echo -e "\n[INFO]basic-auth for apiserver is enabled!"
|
||||
sed -n '/BASIC_AUTH_USER/p' $CONFIG
|
||||
sed -n '/BASIC_AUTH_PASS/p' $CONFIG
|
||||
elif [[ $ENABLED == no ]];then
|
||||
echo -e "\n[INFO]basic-auth for apiserver is disabled!\n"
|
||||
fi
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
}
|
||||
|
||||
### Main Lines ##################################################
|
||||
function main() {
|
||||
BASEPATH=$(cd `dirname $0`; pwd)
|
||||
cd "$BASEPATH"
|
||||
|
||||
# check workdir
|
||||
[[ "$BASEPATH" == "/etc/kubeasz" ]] || { logger error "workdir should be '/etc/kubeasz'"; exit 1; }
|
||||
BASE="/etc/kubeasz"
|
||||
[[ -d "$BASE" ]] || { logger error "invalid dir:$BASE, try: 'ezdown -D'"; exit 1; }
|
||||
cd "$BASE"
|
||||
|
||||
# check bash shell
|
||||
readlink /proc/$$/exe|grep -q "dash" && { logger error "you should use bash shell only"; exit 1; }
|
||||
|
@ -535,39 +382,37 @@ function main() {
|
|||
case "$1" in
|
||||
### in-cluster operations #####################
|
||||
(add-etcd)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a etcd node"
|
||||
CMD="add-etcd $2"
|
||||
[ "$#" -gt 2 ] || { usage >&2; exit 2; }
|
||||
add-etcd "${@:2}"
|
||||
;;
|
||||
(add-master)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a k8s master node"
|
||||
CMD="add-master $2 ${@:3}"
|
||||
[ "$#" -gt 2 ] || { usage >&2; exit 2; }
|
||||
add-master "${@:2}"
|
||||
;;
|
||||
(add-node)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a k8s work node"
|
||||
CMD="add-node $2 ${@:3}"
|
||||
[ "$#" -gt 2 ] || { usage >&2; exit 2; }
|
||||
add-node "${@:2}"
|
||||
;;
|
||||
(del-etcd)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a etcd node"
|
||||
CMD="del-etcd $2"
|
||||
[ "$#" -eq 3 ] || { usage >&2; exit 2; }
|
||||
del-etcd "$2" "$3"
|
||||
;;
|
||||
(del-master)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a kube-master"
|
||||
CMD="del-master $2"
|
||||
[ "$#" -eq 3 ] || { usage >&2; exit 2; }
|
||||
del-master "$2" "$3"
|
||||
;;
|
||||
(del-node)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a kube-node"
|
||||
CMD="del-node $2"
|
||||
[ "$#" -eq 3 ] || { usage >&2; exit 2; }
|
||||
del-node "$2" "$3"
|
||||
;;
|
||||
### cluster-wide operations #######################
|
||||
(checkout)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
checkout "$2"
|
||||
;;
|
||||
(list)
|
||||
ACTION="Action: list all of clusters managed"
|
||||
CMD="list"
|
||||
[ "$#" -eq 1 ] || { usage >&2; exit 2; }
|
||||
list
|
||||
;;
|
||||
(new)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
|
@ -602,20 +447,14 @@ function main() {
|
|||
cmd "$2" destroy
|
||||
;;
|
||||
(start-aio)
|
||||
ACTION="Action: start an AllInOne cluster"
|
||||
CMD="start-aio"
|
||||
[ "$#" -eq 1 ] || { usage >&2; exit 2; }
|
||||
start-aio
|
||||
;;
|
||||
(help)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
help-info $2
|
||||
help-info "$2"
|
||||
exit 0
|
||||
;;
|
||||
### extra operations ##############################
|
||||
(basic-auth)
|
||||
[ "$#" -gt 1 ] || { help-info $1; exit 2; }
|
||||
ACTION="Action: enable/disable apiserver's basic-auth"
|
||||
CMD="basic-auth $*"
|
||||
;;
|
||||
(*)
|
||||
usage
|
||||
exit 0
|
||||
|
|
105
ezdown
105
ezdown
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
#--------------------------------------------------
|
||||
# This script is for:
|
||||
# This script is used for:
|
||||
# 1. to download the scripts/binaries/images needed for installing a k8s cluster with kubeasz
|
||||
# 2. to run kubeasz in a container (optional)
|
||||
# @author: gjmzj
|
||||
|
@ -21,10 +21,10 @@ SYS_PKG_VER=0.3.3
|
|||
|
||||
# images needed by k8s cluster
|
||||
calicoVer=v3.15.3
|
||||
ciliumVer=v1.4.1
|
||||
flannelVer=v0.13.0-amd64
|
||||
kubeRouterVer=v0.3.1
|
||||
kubeOvnVer=v1.5.3
|
||||
export ciliumVer=v1.4.1
|
||||
export kubeRouterVer=v0.3.1
|
||||
export kubeOvnVer=v1.5.3
|
||||
corednsVer=1.7.1
|
||||
dashboardVer=v2.1.0
|
||||
dashboardMetricsScraperVer=v1.0.6
|
||||
|
@ -45,8 +45,6 @@ function usage() {
|
|||
-m <str> set docker registry mirrors, default "CN"(used in Mainland,China)
|
||||
-p <ver> set kubeasz-sys-pkg version, default "$SYS_PKG_VER"
|
||||
-z <ver> set kubeasz version, default "$KUBEASZ_VER"
|
||||
|
||||
see more at https://github.com/kubeasz/dockerfiles
|
||||
EOF
|
||||
}
|
||||
|
||||
|
@ -86,12 +84,12 @@ function download_docker() {
|
|||
else
|
||||
wget -c "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; }
|
||||
fi
|
||||
/bin/mv -f ./docker-${DOCKER_VER}.tgz $BASE/down
|
||||
/bin/mv -f "./docker-$DOCKER_VER.tgz" "$BASE/down"
|
||||
fi
|
||||
|
||||
tar zxf $BASE/down/docker-${DOCKER_VER}.tgz -C $BASE/down && \
|
||||
/bin/cp -f $BASE/down/docker/* $BASE/bin && \
|
||||
/bin/mv -f $BASE/down/docker/* /opt/kube/bin && \
|
||||
tar zxf "$BASE/down/docker-$DOCKER_VER.tgz" -C "$BASE/down" && \
|
||||
/bin/cp -f "$BASE"/down/docker/* "$BASE/bin" && \
|
||||
/bin/mv -f "$BASE"/down/docker/* /opt/kube/bin && \
|
||||
ln -sf /opt/kube/bin/docker /bin/docker
|
||||
}
|
||||
|
||||
|
@ -177,17 +175,17 @@ function get_kubeasz() {
|
|||
docker run -d --name temp_easz easzlab/kubeasz:${KUBEASZ_VER} || { logger error "download failed."; exit 1; }
|
||||
|
||||
[[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "$BASE/down/docker-${DOCKER_VER}.tgz" /tmp
|
||||
[[ -d "$BASE/bin" ]] && /bin/mv -f $BASE/bin /tmp
|
||||
[[ -d "$BASE/bin" ]] && /bin/mv -f "$BASE/bin" /tmp
|
||||
|
||||
rm -rf $BASE && \
|
||||
rm -rf "$BASE" && \
|
||||
logger debug "cp kubeasz code from the temporary container" && \
|
||||
docker cp temp_easz:$BASE $BASE && \
|
||||
docker cp "temp_easz:$BASE" "$BASE" && \
|
||||
logger debug "stop&remove temporary container" && \
|
||||
docker rm -f temp_easz
|
||||
|
||||
mkdir -p $BASE/bin
|
||||
[[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" $BASE/down
|
||||
[[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* $BASE/bin
|
||||
mkdir -p "$BASE/bin" "$BASE/down"
|
||||
[[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" "$BASE/down"
|
||||
[[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* "$BASE/bin"
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -195,87 +193,86 @@ function get_k8s_bin() {
|
|||
[[ -f "$BASE/bin/kubelet" ]] && { logger warn "kubernetes binaries existed"; return 0; }
|
||||
|
||||
logger info "downloading kubernetes: $K8S_BIN_VER binaries"
|
||||
docker pull easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \
|
||||
docker pull easzlab/kubeasz-k8s-bin:"$K8S_BIN_VER" && \
|
||||
logger debug "run a temporary container" && \
|
||||
docker run -d --name temp_k8s_bin easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \
|
||||
logger debug "cp k8s binaries" && \
|
||||
docker cp temp_k8s_bin:/k8s $BASE/k8s_bin_tmp && \
|
||||
/bin/mv -f $BASE/k8s_bin_tmp/* $BASE/bin && \
|
||||
docker cp temp_k8s_bin:/k8s "$BASE/k8s_bin_tmp" && \
|
||||
/bin/mv -f "$BASE"/k8s_bin_tmp/* "$BASE/bin" && \
|
||||
logger debug "stop&remove temporary container" && \
|
||||
docker rm -f temp_k8s_bin && \
|
||||
rm -rf $BASE/k8s_bin_tmp
|
||||
rm -rf "$BASE/k8s_bin_tmp"
|
||||
}
|
||||
|
||||
function get_ext_bin() {
|
||||
[[ -f "$BASE/bin/etcdctl" ]] && { logger warn "extral binaries existed"; return 0; }
|
||||
|
||||
logger info "downloading extral binaries kubeasz-ext-bin:$EXT_BIN_VER"
|
||||
docker pull easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \
|
||||
docker pull "easzlab/kubeasz-ext-bin:$EXT_BIN_VER" && \
|
||||
logger debug "run a temporary container" && \
|
||||
docker run -d --name temp_ext_bin easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \
|
||||
docker run -d --name temp_ext_bin "easzlab/kubeasz-ext-bin:$EXT_BIN_VER" && \
|
||||
logger debug "cp extral binaries" && \
|
||||
docker cp temp_ext_bin:/extra $BASE/extra_bin_tmp && \
|
||||
/bin/mv -f $BASE/extra_bin_tmp/* $BASE/bin && \
|
||||
docker cp temp_ext_bin:/extra "$BASE/extra_bin_tmp" && \
|
||||
/bin/mv -f "$BASE"/extra_bin_tmp/* "$BASE/bin" && \
|
||||
logger debug "stop&remove temporary container" && \
|
||||
docker rm -f temp_ext_bin && \
|
||||
rm -rf $BASE/extra_bin_tmp
|
||||
rm -rf "$BASE/extra_bin_tmp"
|
||||
}
|
||||
|
||||
function get_sys_pkg() {
|
||||
[[ -f "$BASE/down/packages/chrony_xenial.tar.gz" ]] && { logger warn "system packages existed"; return 0; }
|
||||
|
||||
logger info "downloading system packages kubeasz-sys-pkg:$SYS_PKG_VER"
|
||||
docker pull easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \
|
||||
docker pull "easzlab/kubeasz-sys-pkg:$SYS_PKG_VER" && \
|
||||
logger debug "run a temporary container" && \
|
||||
docker run -d --name temp_sys_pkg easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \
|
||||
docker run -d --name temp_sys_pkg "easzlab/kubeasz-sys-pkg:$SYS_PKG_VER" && \
|
||||
logger debug "cp system packages" && \
|
||||
docker cp temp_sys_pkg:/packages $BASE/down && \
|
||||
docker cp temp_sys_pkg:/packages "$BASE/down" && \
|
||||
logger debug "stop&remove temporary container" && \
|
||||
docker rm -f temp_sys_pkg
|
||||
}
|
||||
|
||||
function get_offline_image() {
|
||||
|
||||
imageDir=$BASE/down
|
||||
[[ -d "$imageDir" ]] || { logger error "$imageDir not existed!"; exit 1; }
|
||||
imageDir="$BASE/down"
|
||||
|
||||
logger info "downloading offline images"
|
||||
|
||||
if [[ ! -f "$imageDir/calico_$calicoVer.tar" ]];then
|
||||
docker pull "calico/cni:${calicoVer}" && \
|
||||
docker pull "calico/pod2daemon-flexvol:${calicoVer}" && \
|
||||
docker pull "calico/kube-controllers:${calicoVer}" && \
|
||||
docker pull "calico/node:${calicoVer}" && \
|
||||
docker save -o ${imageDir}/calico_${calicoVer}.tar calico/cni:${calicoVer} calico/kube-controllers:${calicoVer} calico/node:${calicoVer} calico/pod2daemon-flexvol:${calicoVer}
|
||||
docker pull "calico/cni:$calicoVer" && \
|
||||
docker pull "calico/pod2daemon-flexvol:$calicoVer" && \
|
||||
docker pull "calico/kube-controllers:$calicoVer" && \
|
||||
docker pull "calico/node:$calicoVer" && \
|
||||
docker save -o "$imageDir/calico_$calicoVer.tar" "calico/cni:$calicoVer" "calico/kube-controllers:$calicoVer" "calico/node:$calicoVer" "calico/pod2daemon-flexvol:$calicoVer"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/coredns_$corednsVer.tar" ]];then
|
||||
docker pull coredns/coredns:${corednsVer} && \
|
||||
docker save -o ${imageDir}/coredns_${corednsVer}.tar coredns/coredns:${corednsVer}
|
||||
docker pull "coredns/coredns:$corednsVer" && \
|
||||
docker save -o "$imageDir/coredns_$corednsVer.tar" "coredns/coredns:$corednsVer"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/dashboard_$dashboardVer.tar" ]];then
|
||||
docker pull kubernetesui/dashboard:${dashboardVer} && \
|
||||
docker save -o ${imageDir}/dashboard_${dashboardVer}.tar kubernetesui/dashboard:${dashboardVer}
|
||||
docker pull "kubernetesui/dashboard:$dashboardVer" && \
|
||||
docker save -o "$imageDir/dashboard_$dashboardVer.tar" "kubernetesui/dashboard:$dashboardVer"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/flannel_$flannelVer.tar" ]];then
|
||||
docker pull easzlab/flannel:${flannelVer} && \
|
||||
docker save -o ${imageDir}/flannel_${flannelVer}.tar easzlab/flannel:${flannelVer}
|
||||
docker pull "easzlab/flannel:$flannelVer" && \
|
||||
docker save -o "$imageDir/flannel_$flannelVer.tar" "easzlab/flannel:$flannelVer"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/metrics-scraper_$dashboardMetricsScraperVer.tar" ]];then
|
||||
docker pull kubernetesui/metrics-scraper:${dashboardMetricsScraperVer} && \
|
||||
docker save -o ${imageDir}/metrics-scraper_${dashboardMetricsScraperVer}.tar kubernetesui/metrics-scraper:${dashboardMetricsScraperVer}
|
||||
docker pull "kubernetesui/metrics-scraper:$dashboardMetricsScraperVer" && \
|
||||
docker save -o "$imageDir/metrics-scraper_$dashboardMetricsScraperVer.tar" "kubernetesui/metrics-scraper:$dashboardMetricsScraperVer"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/metrics-server_$metricsVer.tar" ]];then
|
||||
docker pull mirrorgooglecontainers/metrics-server-amd64:${metricsVer} && \
|
||||
docker save -o ${imageDir}/metrics-server_${metricsVer}.tar mirrorgooglecontainers/metrics-server-amd64:${metricsVer}
|
||||
docker pull "mirrorgooglecontainers/metrics-server-amd64:$metricsVer" && \
|
||||
docker save -o "$imageDir/metrics-server_$metricsVer.tar" "mirrorgooglecontainers/metrics-server-amd64:$metricsVer"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/pause_$pauseVer.tar" ]];then
|
||||
docker pull easzlab/pause-amd64:${pauseVer} && \
|
||||
docker save -o ${imageDir}/pause_${pauseVer}.tar easzlab/pause-amd64:${pauseVer}
|
||||
/bin/cp -u "${imageDir}/pause_${pauseVer}.tar" "${imageDir}/pause.tar"
|
||||
docker pull "easzlab/pause-amd64:$pauseVer" && \
|
||||
docker save -o "$imageDir/pause_$pauseVer.tar" "easzlab/pause-amd64:$pauseVer"
|
||||
/bin/cp -u "$imageDir/pause_$pauseVer.tar" "$imageDir/pause.tar"
|
||||
fi
|
||||
if [[ ! -f "$imageDir/kubeasz_$KUBEASZ_VER.tar" ]];then
|
||||
docker pull easzlab/kubeasz:${KUBEASZ_VER} && \
|
||||
docker save -o ${imageDir}/kubeasz_${KUBEASZ_VER}.tar easzlab/kubeasz:${KUBEASZ_VER}
|
||||
docker pull "easzlab/kubeasz:$KUBEASZ_VER" && \
|
||||
docker save -o "$imageDir/kubeasz_$KUBEASZ_VER.tar" "easzlab/kubeasz:$KUBEASZ_VER"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -313,18 +310,16 @@ function start_kubeasz_docker() {
|
|||
fi
|
||||
|
||||
#
|
||||
docker load -i $BASE/down/kubeasz_${KUBEASZ_VER}.tar
|
||||
docker load -i "$BASE/down/kubeasz_$KUBEASZ_VER.tar"
|
||||
|
||||
# run kubeasz docker container
|
||||
docker run --detach \
|
||||
--name kubeasz \
|
||||
--restart always \
|
||||
--env HOST_IP="$host_ip" \
|
||||
--volume $BASE:$BASE \
|
||||
--volume "$BASE":"$BASE" \
|
||||
--volume /root/.kube:/root/.kube \
|
||||
--volume /root/.ssh/id_rsa:/root/.ssh/id_rsa:ro \
|
||||
--volume /root/.ssh/id_rsa.pub:/root/.ssh/id_rsa.pub:ro \
|
||||
--volume /root/.ssh/known_hosts:/root/.ssh/known_hosts:ro \
|
||||
--volume /root/.ssh:/root/.ssh \
|
||||
easzlab/kubeasz:${KUBEASZ_VER} sleep 36000
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: whoami
|
||||
labels:
|
||||
app: whoami
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: whoami
|
||||
sessionAffinity: None
|
||||
#type: NodePort
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: whoami
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: whoami
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: whoami
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- containerPort: 80
|
|
@ -1,9 +1,7 @@
|
|||
# Note: this playbook cann't run independently
|
||||
# Usage: easzctl add-etcd 1.1.1.1
|
||||
|
||||
# add new-etcd node, one at a time
|
||||
- hosts: "{{ NODE_TO_ADD }}"
|
||||
tasks:
|
||||
# step1: find a healthy member in the etcd cluster
|
||||
- block:
|
||||
- name: set NODE_IPS of the etcd cluster
|
||||
set_fact: NODE_IPS="{% for host in groups['etcd'] %}{{ host }} {% endfor %}"
|
||||
|
@ -30,12 +28,13 @@
|
|||
- debug: var="RUNNING_NODE.stdout"
|
||||
connection: local
|
||||
|
||||
# step2: add a new member in the etcd cluster
|
||||
- name: add a new etcd member
|
||||
shell: "ETCDCTL_API=3 {{ bin_dir }}/etcdctl member add {{ NODE_NAME }} \
|
||||
shell: "ETCDCTL_API=3 {{ bin_dir }}/etcdctl member add etcd-{{ NODE_TO_ADD }} \
|
||||
--peer-urls=https://{{ NODE_TO_ADD }}:2380"
|
||||
delegate_to: "{{ RUNNING_NODE.stdout }}"
|
||||
|
||||
# start the new-etcd node
|
||||
# step3: start the new-etcd node
|
||||
- hosts: "{{ NODE_TO_ADD }}"
|
||||
vars:
|
||||
CLUSTER_STATE: existing
|
||||
|
@ -43,10 +42,3 @@
|
|||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||
- prepare
|
||||
- etcd
|
||||
|
||||
# restart the original etcd cluster with the new configuration
|
||||
- hosts: etcd
|
||||
vars:
|
||||
CLUSTER_STATE: existing
|
||||
roles:
|
||||
- etcd
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# Note: this playbook can not run independently
|
||||
# Usage: easzctl add-node 1.1.1.1
|
||||
|
||||
- hosts: "{{ NODE_TO_ADD }}"
|
||||
roles:
|
||||
|
@ -23,7 +22,7 @@
|
|||
- "{{ base_dir }}/down/coredns*.tar"
|
||||
- "{{ base_dir }}/down/dashboard*.tar"
|
||||
- "{{ base_dir }}/down/metrics-scraper*.tar"
|
||||
- "{{ base_dir }}/down/metrics*.tar"
|
||||
- "{{ base_dir }}/down/metrics-server*.tar"
|
||||
- "{{ base_dir }}/down/traefik*.tar"
|
||||
ignore_errors: true
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# Note: this playbook cann't run independently
|
||||
# Usage: easzctl add-master 1.1.1.1
|
||||
|
||||
- hosts: "{{ NODE_TO_ADD }}"
|
||||
roles:
|
||||
|
@ -17,13 +16,10 @@
|
|||
#
|
||||
tasks:
|
||||
- name: Making master nodes SchedulingDisabled
|
||||
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} "
|
||||
shell: "{{ base_dir }}/bin/kubectl cordon {{ NODE_TO_ADD }} "
|
||||
when: "inventory_hostname not in groups['kube-node']"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Setting master role name
|
||||
shell: "{{ bin_dir }}/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite"
|
||||
shell: "{{ base_dir }}/bin/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite"
|
||||
ignore_errors: true
|
||||
|
||||
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||
# refer to the function 'add-node()' in 'tools/easzctl'
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# WARNNING: this playbook will clean the etcd {{ ETCD_TO_DEL }}
|
||||
# USAGE: easzctl del-etcd 1.1.1.1
|
||||
|
||||
- hosts: localhost
|
||||
vars_prompt:
|
||||
|
@ -42,7 +41,7 @@
|
|||
|
||||
- debug: var="RUNNING_NODE.stdout"
|
||||
|
||||
# step2: remove jobs run on the healthy member if needed
|
||||
# step2: remove jobs run on the healthy member
|
||||
- name: get ID of etcd node to delete
|
||||
shell: "ETCDCTL_API=3 {{ bin_dir }}/etcdctl member list \
|
||||
|grep {{ ETCD_TO_DEL }}:2380|cut -d',' -f1"
|
||||
|
@ -55,26 +54,26 @@
|
|||
register: ETCD_NAME
|
||||
delegate_to: "{{ RUNNING_NODE.stdout }}"
|
||||
|
||||
- debug: var="ETCD_NAME.stdout"
|
||||
|
||||
- name: delete a etcd member
|
||||
shell: "ETCDCTL_API=3 {{ bin_dir }}/etcdctl member remove {{ ETCD_ID.stdout }}"
|
||||
delegate_to: "{{ RUNNING_NODE.stdout }}"
|
||||
when: "ETCD_ID.stdout != ''"
|
||||
|
||||
- name: clean etcd {{ ETCD_TO_DEL }} if possible
|
||||
shell: "ansible-playbook {{ base_dir }}/roles/clean/clean_node.yml \
|
||||
- name: clean etcd {{ ETCD_TO_DEL }}
|
||||
shell: "ansible-playbook -i {{ base_dir }}/clusters/{{ CLUSTER }}/hosts \
|
||||
{{ base_dir }}/roles/clean/clean_node.yml \
|
||||
-e NODE_TO_CLEAN={{ ETCD_TO_DEL }} \
|
||||
-e DEL_ETCD=yes >> /tmp/ansible-`date +'%Y%m%d%H%M%S'`.log 2>&1 \
|
||||
|| echo 'data not cleaned on {{ ETCD_TO_DEL }}'"
|
||||
register: CLEAN_STATUS
|
||||
|
||||
- debug: var="CLEAN_STATUS.stdout"
|
||||
- debug: var="CLEAN_STATUS"
|
||||
|
||||
# lineinfile is inadequate to delete lines between some specific line range
|
||||
- name: remove the etcd's node entry in hosts
|
||||
shell: 'sed -i "/^\[etcd/,/^\[kube-master/ {/^{{ ETCD_TO_DEL }}[^0-9]/d}" {{ base_dir }}/hosts'
|
||||
shell: 'sed -i "/^\[etcd/,/^\[kube-master/ {/^{{ ETCD_TO_DEL }}[^0-9]/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
||||
args:
|
||||
warn: false
|
||||
|
||||
- name: reconfig and restart the etcd cluster
|
||||
shell: "ansible-playbook {{ base_dir }}/02.etcd.yml >> /tmp/ansible-`date +'%Y%m%d%H%M%S'`.log 2>&1"
|
||||
when: "groups['etcd']|length > 1 and ETCD_TO_DEL in groups['etcd']"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# WARNNING: this playbook will clean the node {{ NODE_TO_DEL }}
|
||||
# USAGE: easzctl del-node 1.1.1.1
|
||||
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
|
@ -7,27 +6,26 @@
|
|||
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
||||
|
||||
- name: run kubectl drain @{{ NODE_TO_DEL }}
|
||||
shell: "{{ bin_dir }}/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-local-data"
|
||||
ignore_errors: true
|
||||
shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force"
|
||||
#ignore_errors: true
|
||||
|
||||
- name: clean node {{ NODE_TO_DEL }} if possible
|
||||
shell: "ansible-playbook {{ base_dir }}/roles/clean/clean_node.yml \
|
||||
- name: clean node {{ NODE_TO_DEL }}
|
||||
shell: "ansible-playbook -i {{ base_dir }}/clusters/{{ CLUSTER }}/hosts \
|
||||
{{ base_dir }}/roles/clean/clean_node.yml \
|
||||
-e NODE_TO_CLEAN={{ NODE_TO_DEL }} \
|
||||
-e DEL_NODE=yes \
|
||||
-e DEL_LB=yes >> /tmp/ansible-`date +'%Y%m%d%H%M%S'`.log 2>&1 \
|
||||
|| echo 'data not cleaned on {{ NODE_TO_DEL }}'"
|
||||
register: CLEAN_STATUS
|
||||
|
||||
- debug: var="CLEAN_STATUS.stdout"
|
||||
|
||||
- debug: var="CLEAN_STATUS.stderr"
|
||||
- debug: var="CLEAN_STATUS"
|
||||
|
||||
- name: run kubectl delete node {{ NODE_TO_DEL }}
|
||||
shell: "{{ bin_dir }}/kubectl delete node {{ NODE_TO_DEL }}"
|
||||
shell: "{{ base_dir }}/bin/kubectl delete node {{ NODE_TO_DEL }}"
|
||||
ignore_errors: true
|
||||
|
||||
# lineinfile is inadequate to delete lines between some specific line range
|
||||
- name: remove the node's entry in hosts
|
||||
shell: 'sed -i "/^\[kube-node/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/hosts'
|
||||
shell: 'sed -i "/^\[kube-node/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
||||
args:
|
||||
warn: false
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# WARNNING: this playbook will clean the kube-master node {{ NODE_TO_DEL }}
|
||||
# USAGE: easzctl del-master 1.1.1.1
|
||||
|
||||
- hosts: localhost
|
||||
tasks:
|
||||
|
@ -7,11 +6,12 @@
|
|||
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
||||
|
||||
- name: run kubectl drain @{{ NODE_TO_DEL }}
|
||||
shell: "{{ bin_dir }}/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-local-data"
|
||||
ignore_errors: true
|
||||
shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force"
|
||||
#ignore_errors: true
|
||||
|
||||
- name: clean node {{ NODE_TO_DEL }} if possible
|
||||
shell: "ansible-playbook {{ base_dir }}/roles/clean/clean_node.yml \
|
||||
- name: clean node {{ NODE_TO_DEL }}
|
||||
shell: "ansible-playbook -i {{ base_dir }}/clusters/{{ CLUSTER }}/hosts \
|
||||
{{ base_dir }}/roles/clean/clean_node.yml \
|
||||
-e NODE_TO_CLEAN={{ NODE_TO_DEL }} \
|
||||
-e DEL_MASTER=yes \
|
||||
-e DEL_NODE=yes \
|
||||
|
@ -19,16 +19,14 @@
|
|||
|| echo 'data not cleaned on {{ NODE_TO_DEL }}'"
|
||||
register: CLEAN_STATUS
|
||||
|
||||
- debug: var="CLEAN_STATUS.stdout"
|
||||
|
||||
- debug: var="CLEAN_STATUS.stderr"
|
||||
- debug: var="CLEAN_STATUS"
|
||||
|
||||
- name: run kubectl delete node {{ NODE_TO_DEL }}
|
||||
shell: "{{ bin_dir }}/kubectl delete node {{ NODE_TO_DEL }}"
|
||||
shell: "{{ base_dir }}/bin/kubectl delete node {{ NODE_TO_DEL }}"
|
||||
ignore_errors: true
|
||||
|
||||
# lineinfile is inadequate to delete lines between some specific line range
|
||||
- name: remove the master's entry in hosts
|
||||
shell: 'sed -i "/^\[kube-master/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/hosts'
|
||||
shell: 'sed -i "/^\[kube-master/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
||||
args:
|
||||
warn: false
|
||||
|
|
|
@ -1,6 +1,32 @@
|
|||
# WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
|
||||
# Read the guide: 'op/upgrade.md' .
|
||||
# Usage: ezctl <cluster_name> upgrade
|
||||
# Usage: ezctl upgrade <cluster_name>
|
||||
|
||||
# check k8s version
|
||||
- hosts: kube-master
|
||||
tasks:
|
||||
- name: get running k8s version
|
||||
shell: "{{ bin_dir }}/kube-apiserver --version"
|
||||
register: RUNNING_VER
|
||||
run_once: true
|
||||
|
||||
- name: print running version
|
||||
debug: var="RUNNING_VER.stdout"
|
||||
run_once: true
|
||||
|
||||
- name: get update version
|
||||
shell: "{{ base_dir }}/bin/kube-apiserver --version"
|
||||
register: UPDATE_VER
|
||||
run_once: true
|
||||
connection: local
|
||||
|
||||
- name: print update version
|
||||
debug: var="UPDATE_VER.stdout"
|
||||
run_once: true
|
||||
|
||||
- name: check version
|
||||
fail: msg="running version is the same as the update version, UPDATE ABORT."
|
||||
when: "RUNNING_VER.stdout == UPDATE_VER.stdout"
|
||||
|
||||
# update masters
|
||||
- hosts:
|
||||
|
|
|
@ -23,7 +23,9 @@
|
|||
- debug: var="ETCD_CLUSTER_STATUS"
|
||||
|
||||
- name: get a running ectd node
|
||||
shell: 'echo -e "{{ ETCD_CLUSTER_STATUS.stdout }} {{ ETCD_CLUSTER_STATUS.stderr }}"|grep "is healthy"|sed -n "1p"|cut -d: -f2|cut -d/ -f3'
|
||||
shell: 'echo -e "{{ ETCD_CLUSTER_STATUS.stdout }}" \
|
||||
"{{ ETCD_CLUSTER_STATUS.stderr }}" \
|
||||
|grep "is healthy"|sed -n "1p"|cut -d: -f2|cut -d/ -f3'
|
||||
register: RUNNING_NODE
|
||||
|
||||
- debug: var="RUNNING_NODE.stdout"
|
||||
|
@ -32,7 +34,7 @@
|
|||
set_fact: temp="{{lookup('pipe','date \"+%Y%m%d_%H%M\"')}}"
|
||||
|
||||
# step2: backup data on the healthy member
|
||||
- name: make a backup on etcd node
|
||||
- name: make a backup on the etcd node
|
||||
shell: "mkdir -p /etcd_backup && cd /etcd_backup && \
|
||||
ETCDCTL_API=3 {{ bin_dir }}/etcdctl snapshot save snapshot_{{ temp }}.db"
|
||||
args:
|
||||
|
@ -46,20 +48,5 @@
|
|||
flat: yes
|
||||
delegate_to: "{{ RUNNING_NODE.stdout }}"
|
||||
|
||||
- hosts:
|
||||
- localhost
|
||||
tasks:
|
||||
- name: Backing up ansible hosts-1
|
||||
copy:
|
||||
src: "{{ base_dir }}/hosts"
|
||||
dest: "{{ cluster_dir }}/backup/hosts"
|
||||
register: p
|
||||
|
||||
- name: Backing up ansible hosts-2
|
||||
shell: "cd {{ cluster_dir }}/backup && \
|
||||
cp -fp hosts hosts-$(date +'%Y%m%d%H%M')"
|
||||
when: 'p is changed'
|
||||
|
||||
#- name: Backing up etcd snapshot with datetime
|
||||
# shell: "cd {{ cluster_dir }}/backup && \
|
||||
# cp -fp snapshot.db snapshot-$(date +'%Y%m%d%H%M').db"
|
||||
- name: update the latest backup
|
||||
shell: 'cd {{ cluster_dir }}/backup/ && /bin/cp -f snapshot_{{ temp }}.db snapshot.db'
|
||||
|
|
|
@ -39,16 +39,35 @@
|
|||
ignore_errors: true
|
||||
|
||||
# as k8s-network-plugins use host-network, '/var/run/docker/netns/default' must be umounted
|
||||
- name: unmount docker filesystem-1
|
||||
- name: umount docker filesystem-1
|
||||
mount: path=/var/run/docker/netns/default state=unmounted
|
||||
|
||||
- name: unmount docker filesystem-2
|
||||
- name: umount docker filesystem-2
|
||||
mount: path=/var/lib/docker/overlay state=unmounted
|
||||
|
||||
- name: umount docker filesystem-3
|
||||
shell: "echo /var/lib/docker/overlay2/*/merged|xargs umount || exit 0"
|
||||
args:
|
||||
warn: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: umount docker filesystem-4
|
||||
shell: "echo /var/lib/docker/containers/*/mounts/shm|xargs umount || exit 0"
|
||||
args:
|
||||
warn: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: umount docker filesystem-5
|
||||
shell: "echo /var/run/docker/netns/*|xargs umount || exit 0"
|
||||
args:
|
||||
warn: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: remove files and dirs
|
||||
file: name={{ item }} state=absent
|
||||
with_items:
|
||||
- "/var/lib/docker/"
|
||||
- "/var/lib/dockershim/"
|
||||
- "/var/run/docker/"
|
||||
- "/etc/docker/"
|
||||
- "/etc/systemd/system/docker.service"
|
||||
|
@ -57,6 +76,7 @@
|
|||
- "/etc/bash_completion.d/docker"
|
||||
- "/usr/bin/docker"
|
||||
when: "'kubeasz' not in install_info.stdout"
|
||||
ignore_errors: true
|
||||
when: CONTAINER_RUNTIME == 'docker'
|
||||
|
||||
- block:
|
||||
|
@ -82,6 +102,7 @@
|
|||
- "/opt/containerd/"
|
||||
- "/var/lib/containerd/"
|
||||
- "/var/run/containerd/"
|
||||
ignore_errors: true
|
||||
when: CONTAINER_RUNTIME == 'containerd'
|
||||
|
||||
- name: remove files and dirs2
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
#
|
||||
- import_tasks: clean_etcd.yml
|
||||
when: 'DEL_ETCD == "yes"'
|
||||
|
||||
- import_tasks: clean_master.yml
|
||||
when: 'DEL_MASTER == "yes"'
|
||||
|
||||
- import_tasks: clean_node.yml
|
||||
when: 'DEL_NODE == "yes"'
|
||||
|
||||
- import_tasks: clean_etcd.yml
|
||||
when: 'DEL_ETCD == "yes"'
|
||||
|
||||
- import_tasks: clean_lb.yml
|
||||
when: 'DEL_LB == "yes"'
|
||||
|
||||
|
|
|
@ -3,5 +3,5 @@
|
|||
db_to_restore: "snapshot.db"
|
||||
|
||||
# etcd 集群间通信的IP和端口, 根据etcd组成员自动生成
|
||||
TMP_NODES: "{% for h in groups['etcd'] %}{{ hostvars[h]['NODE_NAME'] }}=https://{{ h }}:2380,{% endfor %}"
|
||||
TMP_NODES: "{% for h in groups['etcd'] %}etcd-{{ h }}=https://{{ h }}:2380,{% endfor %}"
|
||||
ETCD_NODES: "{{ TMP_NODES.rstrip(',') }}"
|
||||
|
|
|
@ -13,18 +13,25 @@
|
|||
dest: "/etcd_backup/snapshot.db"
|
||||
|
||||
- name: 清理上次备份恢复数据
|
||||
file: name=/etcd_backup/{{ NODE_NAME }}.etcd state=absent
|
||||
file: name=/etcd_backup/etcd-{{ inventory_hostname }}.etcd state=absent
|
||||
|
||||
- name: etcd 数据恢复
|
||||
shell: "cd /etcd_backup && \
|
||||
ETCDCTL_API=3 {{ bin_dir }}/etcdctl snapshot restore snapshot.db \
|
||||
--name {{ NODE_NAME }} \
|
||||
--name etcd-{{ inventory_hostname }} \
|
||||
--initial-cluster {{ ETCD_NODES }} \
|
||||
--initial-cluster-token etcd-cluster-0 \
|
||||
--initial-advertise-peer-urls https://{{ inventory_hostname }}:2380"
|
||||
|
||||
- name: 恢复数据至etcd 数据目录
|
||||
shell: "cp -rf /etcd_backup/{{ NODE_NAME }}.etcd/member /var/lib/etcd/"
|
||||
shell: "cp -rf /etcd_backup/etcd-{{ inventory_hostname }}.etcd/member /var/lib/etcd/"
|
||||
|
||||
- name: 重启etcd 服务
|
||||
service: name=etcd state=restarted
|
||||
|
||||
- name: 以轮询的方式等待服务同步完成
|
||||
shell: "systemctl status etcd.service|grep Active"
|
||||
register: etcd_status
|
||||
until: '"running" in etcd_status.stdout'
|
||||
retries: 8
|
||||
delay: 8
|
||||
|
|
|
@ -24,17 +24,24 @@
|
|||
shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \
|
||||
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
|
||||
--embed-certs=true \
|
||||
--server={{ KUBE_APISERVER }}"
|
||||
--server={{ KUBE_APISERVER }} \
|
||||
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
|
||||
|
||||
- name: 设置客户端认证参数
|
||||
shell: "{{ base_dir }}/bin/kubectl config set-credentials {{ USER_NAME }} \
|
||||
--client-certificate={{ cluster_dir }}/ssl/{{ USER_NAME }}.pem \
|
||||
--embed-certs=true \
|
||||
--client-key={{ cluster_dir }}/ssl/{{ USER_NAME }}-key.pem"
|
||||
--client-key={{ cluster_dir }}/ssl/{{ USER_NAME }}-key.pem \
|
||||
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
|
||||
|
||||
- name: 设置上下文参数
|
||||
shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \
|
||||
--cluster={{ CLUSTER_NAME }} --user={{ USER_NAME }}"
|
||||
--cluster={{ CLUSTER_NAME }} --user={{ USER_NAME }} \
|
||||
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
|
||||
|
||||
- name: 选择默认上下文
|
||||
shell: "{{ base_dir }}/bin/kubectl config use-context {{ CONTEXT_NAME }}"
|
||||
shell: "{{ base_dir }}/bin/kubectl config use-context {{ CONTEXT_NAME }} \
|
||||
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
|
||||
|
||||
- name: 安装kubeconfig
|
||||
copy: src={{ cluster_dir }}/kubectl.kubeconfig dest=~/.kube/config
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
with_items:
|
||||
- "{{ cluster_dir }}/ssl"
|
||||
- "{{ cluster_dir }}/backup"
|
||||
- "~/.kube"
|
||||
|
||||
- name: 本地设置 bin 目录权限
|
||||
file: path={{ base_dir }}/bin state=directory mode=0755 recurse=yes
|
||||
|
|
|
@ -16,11 +16,15 @@
|
|||
|
||||
- name: debug info
|
||||
debug: var="docker_ver"
|
||||
connection: local
|
||||
run_once: true
|
||||
tags: upgrade_docker, download_docker
|
||||
|
||||
- name: 转换docker版本信息为浮点数
|
||||
set_fact:
|
||||
DOCKER_VER: "{{ docker_ver.stdout.split('.')[0]|int + docker_ver.stdout.split('.')[1]|int/100 }}"
|
||||
connection: local
|
||||
run_once: true
|
||||
tags: upgrade_docker, download_docker
|
||||
|
||||
- name: debug info
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# etcd 集群间通信的IP和端口, 根据etcd组成员自动生成
|
||||
TMP_NODES: "{% for h in groups['etcd'] %}{{ hostvars[h]['NODE_NAME'] }}=https://{{ h }}:2380,{% endfor %}"
|
||||
TMP_NODES: "{% for h in groups['etcd'] %}etcd-{{ h }}=https://{{ h }}:2380,{% endfor %}"
|
||||
ETCD_NODES: "{{ TMP_NODES.rstrip(',') }}"
|
||||
|
||||
# etcd 集群初始状态 new/existing
|
|
@ -1,9 +1,5 @@
|
|||
- name: prepare some dirs
|
||||
file: name={{ item }} state=directory
|
||||
with_items:
|
||||
- "{{ bin_dir }}"
|
||||
- "{{ ca_dir }}"
|
||||
- "/var/lib/etcd" # etcd 工作目录
|
||||
file: name=/var/lib/etcd state=directory mode=0700
|
||||
|
||||
- name: 下载etcd二进制文件
|
||||
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
|
||||
|
@ -33,7 +29,7 @@
|
|||
|
||||
- name: 创建etcd的systemd unit文件
|
||||
template: src=etcd.service.j2 dest=/etc/systemd/system/etcd.service
|
||||
tags: upgrade_etcd
|
||||
tags: upgrade_etcd, restart_etcd
|
||||
|
||||
- name: 开机启用etcd服务
|
||||
shell: systemctl enable etcd
|
||||
|
@ -42,7 +38,7 @@
|
|||
- name: 开启etcd服务
|
||||
shell: systemctl daemon-reload && systemctl restart etcd
|
||||
ignore_errors: true
|
||||
tags: upgrade_etcd
|
||||
tags: upgrade_etcd, restart_etcd
|
||||
|
||||
- name: 以轮询的方式等待服务同步完成
|
||||
shell: "systemctl status etcd.service|grep Active"
|
||||
|
@ -50,4 +46,4 @@
|
|||
until: '"running" in etcd_status.stdout'
|
||||
retries: 8
|
||||
delay: 8
|
||||
tags: upgrade_etcd
|
||||
tags: upgrade_etcd, restart_etcd
|
||||
|
|
|
@ -9,7 +9,7 @@ Documentation=https://github.com/coreos
|
|||
Type=notify
|
||||
WorkingDirectory=/var/lib/etcd/
|
||||
ExecStart={{ bin_dir }}/etcd \
|
||||
--name={{ NODE_NAME }} \
|
||||
--name=etcd-{{ inventory_hostname }} \
|
||||
--cert-file={{ ca_dir }}/etcd.pem \
|
||||
--key-file={{ ca_dir }}/etcd-key.pem \
|
||||
--peer-cert-file={{ ca_dir }}/etcd.pem \
|
||||
|
|
|
@ -72,6 +72,33 @@
|
|||
systemctl restart kube-controller-manager && systemctl restart kube-scheduler"
|
||||
tags: upgrade_k8s, restart_master
|
||||
|
||||
# 轮询等待kube-apiserver启动完成
|
||||
- name: 轮询等待kube-apiserver启动
|
||||
shell: "systemctl status kube-apiserver.service|grep Active"
|
||||
register: api_status
|
||||
until: '"running" in api_status.stdout'
|
||||
retries: 10
|
||||
delay: 3
|
||||
tags: upgrade_k8s, restart_master
|
||||
|
||||
# 轮询等待kube-controller-manager启动完成
|
||||
- name: 轮询等待kube-controller-manager启动
|
||||
shell: "systemctl status kube-controller-manager.service|grep Active"
|
||||
register: cm_status
|
||||
until: '"running" in cm_status.stdout'
|
||||
retries: 8
|
||||
delay: 3
|
||||
tags: upgrade_k8s, restart_master
|
||||
|
||||
# 轮询等待kube-scheduler启动完成
|
||||
- name: 轮询等待kube-scheduler启动
|
||||
shell: "systemctl status kube-scheduler.service|grep Active"
|
||||
register: sch_status
|
||||
until: '"running" in sch_status.stdout'
|
||||
retries: 8
|
||||
delay: 3
|
||||
tags: upgrade_k8s, restart_master
|
||||
|
||||
- name: 以轮询的方式等待master服务启动完成
|
||||
command: "{{ bin_dir }}/kubectl get node"
|
||||
register: result
|
||||
|
|
|
@ -10,7 +10,7 @@ ExecStart={{ bin_dir }}/kube-controller-manager \
|
|||
--cluster-name=kubernetes \
|
||||
--cluster-signing-cert-file={{ ca_dir }}/ca.pem \
|
||||
--cluster-signing-key-file={{ ca_dir }}/ca-key.pem \
|
||||
--kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig \
|
||||
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
|
||||
--leader-elect=true \
|
||||
--node-cidr-mask-size={{ NODE_CIDR_LEN }} \
|
||||
--root-ca-file={{ ca_dir }}/ca.pem \
|
||||
|
|
|
@ -5,7 +5,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
|||
[Service]
|
||||
ExecStart={{ bin_dir }}/kube-scheduler \
|
||||
--address=127.0.0.1 \
|
||||
--kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig \
|
||||
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
|
||||
--leader-elect=true \
|
||||
--v=2
|
||||
Restart=always
|
||||
|
|
|
@ -65,7 +65,6 @@
|
|||
tags: upgrade_k8s, restart_node
|
||||
|
||||
##-------kube-proxy部分----------------
|
||||
|
||||
- name: 替换 kube-proxy.kubeconfig 的 apiserver 地址
|
||||
lineinfile:
|
||||
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
||||
|
@ -84,12 +83,21 @@
|
|||
shell: systemctl daemon-reload && systemctl restart kube-proxy
|
||||
tags: reload-kube-proxy, upgrade_k8s, restart_node
|
||||
|
||||
# 轮询等待kube-proxy启动完成
|
||||
- name: 轮询等待kube-proxy启动
|
||||
shell: "systemctl status kube-proxy.service|grep Active"
|
||||
register: kubeproxy_status
|
||||
until: '"running" in kubeproxy_status.stdout'
|
||||
retries: 4
|
||||
delay: 2
|
||||
tags: reload-kube-proxy, upgrade_k8s, restart_node
|
||||
|
||||
# 轮询等待kubelet启动完成
|
||||
- name: 轮询等待kubelet启动
|
||||
shell: "systemctl status kubelet.service|grep Active"
|
||||
register: kubelet_status
|
||||
until: '"running" in kubelet_status.stdout'
|
||||
retries: 8
|
||||
retries: 4
|
||||
delay: 2
|
||||
tags: reload-kube-proxy, upgrade_k8s, restart_node
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ ExecStart={{ bin_dir }}/kube-proxy \
|
|||
--bind-address={{ inventory_hostname }} \
|
||||
--cluster-cidr={{ CLUSTER_CIDR }} \
|
||||
--hostname-override={{ inventory_hostname }} \
|
||||
--kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig \
|
||||
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
|
||||
--logtostderr=true \
|
||||
--proxy-mode={{ PROXY_MODE }}
|
||||
Restart=always
|
||||
|
|
|
@ -11,3 +11,6 @@ KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \
|
|||
https://{{ groups['kube-master'][0] }}:6443 \
|
||||
{%- endif -%} \
|
||||
{%- endif -%}"
|
||||
|
||||
# 增加/删除 master 节点时,node 节点需要重新配置 haproxy
|
||||
MASTER_CHG: "no"
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
line: 'source <(kubectl completion bash)'
|
||||
|
||||
- name: 分发 kubeconfig配置文件
|
||||
copy: src=/root/.kube/config dest=/root/.kube/config
|
||||
copy: src={{ cluster_dir }}/kubectl.kubeconfig dest=/root/.kube/config
|
||||
|
||||
- name: 分发 kube-proxy.kubeconfig配置文件
|
||||
copy: src={{ cluster_dir }}/kube-proxy.kubeconfig dest=/etc/kubernetes/kube-proxy.kubeconfig
|
||||
|
|
Loading…
Reference in New Issue