mirror of https://github.com/easzlab/kubeasz.git
dev-3.0 mainline fixes
parent
c69be3f0da
commit
524e2390fa
|
@ -19,3 +19,4 @@ roles/cluster-backup/files/*
|
|||
|
||||
# cluster backups
|
||||
.cluster/
|
||||
clusters/
|
||||
|
|
|
@ -8,7 +8,7 @@ INSTALL_SOURCE: "online"
|
|||
############################
|
||||
# role:chrony
|
||||
############################
|
||||
# 设置时间源服务器
|
||||
# 设置时间源服务器【重要:集群内机器时间必须同步】
|
||||
ntp_servers:
|
||||
- "ntp1.aliyun.com"
|
||||
- "time1.cloud.tencent.com"
|
||||
|
@ -37,6 +37,7 @@ CONTEXT_NAME: "context-{{ CLUSTER_NAME }}-{{ USER_NAME }}"
|
|||
############################
|
||||
# role:runtime [containerd,docker]
|
||||
############################
|
||||
# ------------------------------------------- containerd
|
||||
# [.]启用容器仓库镜像
|
||||
ENABLE_MIRROR_REGISTRY: true
|
||||
|
||||
|
@ -46,6 +47,7 @@ SANDBOX_IMAGE: "easzlab/pause-amd64:3.2"
|
|||
# [containerd]容器持久化存储目录
|
||||
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"
|
||||
|
||||
# ------------------------------------------- docker
|
||||
# [docker]容器存储目录
|
||||
DOCKER_STORAGE_DIR: "/var/lib/docker"
|
||||
|
||||
|
@ -97,17 +99,19 @@ BALANCE_ALG: "roundrobin"
|
|||
############################
|
||||
# role:network [flannel,calico,cilium,kube-ovn,kube-router]
|
||||
############################
|
||||
# ------------------------------------------- flannel
|
||||
# [flannel]设置flannel 后端"host-gw","vxlan"等
|
||||
FLANNEL_BACKEND: "vxlan"
|
||||
DIRECT_ROUTING: false
|
||||
|
||||
# [flannel] flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64"
|
||||
flannelVer: "v0.13.0-amd64"
|
||||
flannelVer: "__flannel__"
|
||||
flanneld_image: "easzlab/flannel:{{ flannelVer }}"
|
||||
|
||||
# [flannel]离线镜像tar包
|
||||
flannel_offline: "flannel_{{ flannelVer }}.tar"
|
||||
|
||||
# ------------------------------------------- calico
|
||||
# [calico]设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md
|
||||
CALICO_IPV4POOL_IPIP: "Always"
|
||||
|
||||
|
@ -118,8 +122,7 @@ IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}"
|
|||
CALICO_NETWORKING_BACKEND: "brid"
|
||||
|
||||
# [calico]更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x]
|
||||
calicoVer: "v3.15.3"
|
||||
calico_ver: "{{ calicoVer }}"
|
||||
calico_ver: "__calico__"
|
||||
|
||||
# [calico]calico 主版本
|
||||
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
|
||||
|
@ -127,21 +130,25 @@ calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
|
|||
# [calico]离线镜像tar包
|
||||
calico_offline: "calico_{{ calico_ver }}.tar"
|
||||
|
||||
# ------------------------------------------- cilium
|
||||
# [cilium]CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7...
|
||||
ETCD_CLUSTER_SIZE: 1
|
||||
|
||||
# [cilium]镜像版本
|
||||
cilium_ver: "v1.4.1"
|
||||
cilium_ver: "__cilium__"
|
||||
|
||||
# [cilium]离线镜像tar包
|
||||
cilium_offline: "cilium_{{ cilium_ver }}.tar"
|
||||
|
||||
# ------------------------------------------- kube-ovn
|
||||
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
|
||||
OVN_DB_NODE: "{{ groups['kube-master'][0] }}"
|
||||
|
||||
# [kube-ovn]离线镜像tar包
|
||||
kube_ovn_offline: "kube_ovn_0.9.1.tar"
|
||||
kube_ovn_ver: "__kube_ovn__"
|
||||
kube_ovn_offline: "kube_ovn_{{ kube_ovn_ver }}.tar"
|
||||
|
||||
# ------------------------------------------- kube-router
|
||||
# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
|
||||
OVERLAY_TYPE: "full"
|
||||
|
||||
|
@ -149,7 +156,7 @@ OVERLAY_TYPE: "full"
|
|||
FIREWALL_ENABLE: "true"
|
||||
|
||||
# [kube-router]kube-router 镜像版本
|
||||
kube_router_ver: "v0.3.1"
|
||||
kube_router_ver: "__kube_router__"
|
||||
busybox_ver: "1.28.4"
|
||||
|
||||
# [kube-router]kube-router 离线镜像tar包
|
||||
|
@ -160,19 +167,18 @@ busybox_offline: "busybox_{{ busybox_ver }}.tar"
|
|||
############################
|
||||
# role:cluster-addon
|
||||
############################
|
||||
# dns 自动安装,'dns_backend'可选"coredns"和“kubedns”
|
||||
# coredns 自动安装
|
||||
dns_install: "yes"
|
||||
dns_backend: "coredns"
|
||||
corednsVer: "1.7.1"
|
||||
corednsVer: "__coredns__"
|
||||
|
||||
# metric server 自动安装
|
||||
metricsserver_install: "yes"
|
||||
metricsVer: "v0.3.6"
|
||||
metricsVer: "__metrics__"
|
||||
|
||||
# dashboard 自动安装
|
||||
dashboard_install: "yes"
|
||||
dashboardVer: "v2.1.0"
|
||||
dashboardMetricsScraperVer: "v1.0.6"
|
||||
dashboardVer: "__dashboard__"
|
||||
dashboardMetricsScraperVer: "__dash_metrics__"
|
||||
|
||||
|
||||
############################
|
||||
|
|
|
@ -60,4 +60,4 @@ base_dir="/etc/kubeasz"
|
|||
cluster_dir="{{ base_dir }}/clusters/_cluster_name_"
|
||||
|
||||
# CA and other components cert/key Directory
|
||||
ca_dir="{{ cluster_dir }}/ssl"
|
||||
ca_dir="/etc/kubernetes/ssl"
|
||||
|
|
|
@ -57,11 +57,11 @@ CLUSTER_DNS_DOMAIN="cluster.local."
|
|||
# Binaries Directory
|
||||
bin_dir="/opt/kube/bin"
|
||||
|
||||
# CA and other components cert/key Directory
|
||||
ca_dir="/etc/kubernetes/ssl"
|
||||
|
||||
# Deploy Directory (kubeasz workspace)
|
||||
base_dir="/etc/kubeasz"
|
||||
|
||||
# Directory for a specific cluster
|
||||
cluster_dir="{{ base_dir }}/clusters/_cluster_name_"
|
||||
|
||||
# CA and other components cert/key Directory
|
||||
ca_dir="/etc/kubernetes/ssl"
|
||||
|
|
356
ezctl
356
ezctl
|
@ -12,23 +12,44 @@ function usage() {
|
|||
Cluster setups:
|
||||
list to list all of the managed clusters
|
||||
new <cluster> to start a new k8s deploy with name 'cluster'
|
||||
setup <cluster> [step] to setup a cluster, also supporting a step-by-step way
|
||||
setup <cluster> <step> to setup a cluster, also supporting a step-by-step way
|
||||
start <cluster> to start all of the k8s services stopped by 'ezctl stop'
|
||||
stop <cluster> to stop all of the k8s services temporarily
|
||||
upgrade <cluster> to upgrade the k8s cluster
|
||||
destroy <cluster> to destroy the k8s cluster
|
||||
start-aio to quickly setup an all-in-one cluster with 'default' settings
|
||||
|
||||
Cluster ops:
|
||||
add-etcd <cluster> <args> to add a etcd-node to the etcd cluster
|
||||
add-master <cluster> <args> to add a master node to the k8s cluster
|
||||
add-node <cluster> <args> to add a work node to the k8s cluster
|
||||
add-etcd <cluster> <ip> to add a etcd-node to the etcd cluster
|
||||
add-master <cluster> <ip> to add a master node to the k8s cluster
|
||||
add-node <cluster> <ip> to add a work node to the k8s cluster
|
||||
del-etcd <cluster> <ip> to delete a etcd-node from the etcd cluster
|
||||
del-master <cluster> <ip> to delete a master node from the k8s cluster
|
||||
del-node <cluster> <ip> to delete a work node from the k8s cluster
|
||||
upgrade <cluster> to upgrade the k8s cluster
|
||||
destroy <cluster> to destroy the current cluster, '--purge' to also delete the context
|
||||
|
||||
Use "ezctl help <command>" for more information about a given command.
|
||||
EOF
|
||||
}
|
||||
|
||||
function usage-setup(){
|
||||
echo -e "\033[33mUsage:\033[0m ezctl setup <cluster> <step>"
|
||||
cat <<EOF
|
||||
available steps:
|
||||
01 prepare to prepare CA/certs & kubeconfig & other system settings
|
||||
02 etcd to setup the etcd cluster
|
||||
03 runtime to setup the container runtime(docker or containerd)
|
||||
04 kube-master to setup the master nodes
|
||||
05 kube-node to setup the worker nodes
|
||||
06 network to setup the network plugin
|
||||
07 cluster-addon to setup other useful plugins
|
||||
all to run 01~07 all at once
|
||||
|
||||
examples: ./ezctl setup test-k8s 01
|
||||
./ezctl setup test-k8s 02
|
||||
./ezctl setup test-k8s all
|
||||
EOF
|
||||
}
|
||||
|
||||
function logger() {
|
||||
TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S')
|
||||
case "$1" in
|
||||
|
@ -51,6 +72,9 @@ function logger() {
|
|||
|
||||
function help-info() {
|
||||
case "$1" in
|
||||
(setup)
|
||||
usage-setup
|
||||
;;
|
||||
(add-etcd)
|
||||
echo -e "Usage: easzctl add-etcd <new_etcd_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md'"
|
||||
;;
|
||||
|
@ -81,7 +105,7 @@ function help-info() {
|
|||
function new() {
|
||||
# check if already existed
|
||||
[[ -d "clusters/$1" ]] && { logger error "cluster: $1 already existed"; exit 1; }
|
||||
[[ "$1" == default ]] && { logger error "name 'default' is reserved for `ezctl start-aio`"; exit 1; }
|
||||
[[ "$1" == default ]] && { logger error "name 'default' is reserved"; exit 1; }
|
||||
|
||||
logger debug "generate custom cluster files in clusters/$1"
|
||||
mkdir -p "clusters/$1"
|
||||
|
@ -89,6 +113,28 @@ function new() {
|
|||
sed -i "s/_cluster_name_/$1/g" "clusters/$1/hosts"
|
||||
cp example/config.yml "clusters/$1/config.yml"
|
||||
|
||||
logger debug "set version of common plugins"
|
||||
calicoVer=$(grep 'calicoVer=' ezdown|cut -d'=' -f2)
|
||||
ciliumVer=$(grep 'ciliumVer=' ezdown|cut -d'=' -f2)
|
||||
flannelVer=$(grep 'flannelVer=' ezdown|cut -d'=' -f2)
|
||||
kubeRouterVer=$(grep 'kubeRouterVer=' ezdown|cut -d'=' -f2)
|
||||
kubeOvnVer=$(grep 'kubeOvnVer=' ezdown|cut -d'=' -f2)
|
||||
corednsVer=$(grep 'corednsVer=' ezdown|cut -d'=' -f2)
|
||||
dashboardVer=$(grep 'dashboardVer=' ezdown|cut -d'=' -f2)
|
||||
dashboardMetricsScraperVer=$(grep 'dashboardMetricsScraperVer=' ezdown|cut -d'=' -f2)
|
||||
metricsVer=$(grep 'metricsVer=' ezdown|cut -d'=' -f2)
|
||||
|
||||
sed -i -e "s/__flannel__/$flannelVer/g" \
|
||||
-e "s/__calico__/$calicoVer/g" \
|
||||
-e "s/__cilium__/$ciliumVer/g" \
|
||||
-e "s/__kube_ovn__/$kubeOvnVer/g" \
|
||||
-e "s/__kube_router__/$kubeRouterVer/g" \
|
||||
-e "s/__coredns__/$corednsVer/g" \
|
||||
-e "s/__dashboard__/$dashboardVer/g" \
|
||||
-e "s/__dash_metrics__/$dashboardMetricsScraperVer/g" \
|
||||
-e "s/__metrics__/$metricsVer/g" "clusters/$1/config.yml"
|
||||
|
||||
|
||||
logger debug "cluster $1: files successfully created."
|
||||
logger info "next steps 1: to config 'clusters/$1/hosts'"
|
||||
logger info "next steps 2: to config 'clusters/$1/config.yml'"
|
||||
|
@ -98,12 +144,83 @@ function setup() {
|
|||
[[ -d "clusters/$1" ]] || { logger error "invalid config, run 'ezctl new $1' first"; return 1; }
|
||||
[[ -f "bin/kube-apiserver" ]] || { logger error "no binaries founded, run 'ezdown -D' fist"; return 1; }
|
||||
|
||||
logger info "\n cluster:$1 setup begins in 5s, press any key to abort\n:"
|
||||
PLAY_BOOK="dummy.yml"
|
||||
case "$2" in
|
||||
(01)
|
||||
PLAY_BOOK="01.prepare.yml"
|
||||
;;
|
||||
(02)
|
||||
PLAY_BOOK="02.etcd.yml"
|
||||
;;
|
||||
(03)
|
||||
PLAY_BOOK="03.runtime.yml"
|
||||
;;
|
||||
(04)
|
||||
PLAY_BOOK="04.kube-master.yml"
|
||||
;;
|
||||
(05)
|
||||
PLAY_BOOK="05.kube-node.yml"
|
||||
;;
|
||||
(06)
|
||||
PLAY_BOOK="06.network.yml"
|
||||
;;
|
||||
(07)
|
||||
PLAY_BOOK="07.cluster-addon.yml"
|
||||
;;
|
||||
(90)
|
||||
PLAY_BOOK="90.setup.yml"
|
||||
;;
|
||||
(all)
|
||||
PLAY_BOOK="90.setup.yml"
|
||||
;;
|
||||
(*)
|
||||
usage-setup
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
logger info "cluster:$1 setup begins in 5s, press any key to abort:\n"
|
||||
! (read -t5 -n1 ANS) || { logger warn "setup aborted"; return 1; }
|
||||
|
||||
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" playbooks/90.setup.yml || return 1
|
||||
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" "playbooks/$PLAY_BOOK" || return 1
|
||||
}
|
||||
|
||||
function cmd() {
|
||||
[[ -d "clusters/$1" ]] || { logger error "invalid config, run 'ezctl new $1' first"; return 1; }
|
||||
|
||||
PLAY_BOOK="dummy.yml"
|
||||
case "$2" in
|
||||
(start)
|
||||
PLAY_BOOK="91.start.yml"
|
||||
;;
|
||||
(stop)
|
||||
PLAY_BOOK="92.stop.yml"
|
||||
;;
|
||||
(upgrade)
|
||||
PLAY_BOOK="93.upgrade.yml"
|
||||
;;
|
||||
(backup)
|
||||
PLAY_BOOK="94.backup.yml"
|
||||
;;
|
||||
(restore)
|
||||
PLAY_BOOK="95.restore.yml"
|
||||
;;
|
||||
(destroy)
|
||||
PLAY_BOOK="99.clean.yml"
|
||||
;;
|
||||
(*)
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
logger info "cluster:$1 $2 begins in 5s, press any key to abort:\n"
|
||||
! (read -t5 -n1 ANS) || { logger warn "$2 aborted"; return 1; }
|
||||
|
||||
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" "playbooks/$PLAY_BOOK" || return 1
|
||||
}
|
||||
|
||||
|
||||
function list() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
|
@ -316,18 +433,6 @@ function install_context() {
|
|||
fi
|
||||
}
|
||||
|
||||
function setup() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||
[ -f "$BASEPATH/bin/kube-apiserver" ] || { echo "[ERROR] no binaries found, download then fist"; return 1; }
|
||||
[ -f "$BASEPATH/hosts" ] || { echo "[ERROR] no ansible hosts found, read 'docs/setup/00-planning_and_overall_intro.md'"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
echo -e "\n[INFO] setup cluster with context: $CLUSTER"
|
||||
echo -e "[INFO] setup begin in 5s, press any key to abort\n:"
|
||||
! (read -t5 -n1 ANS) || { echo "[WARN] setup aborted"; return 1; }
|
||||
ansible-playbook $BASEPATH/90.setup.yml || return 1
|
||||
save_context
|
||||
}
|
||||
|
||||
function list() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
|
@ -349,30 +454,6 @@ function list() {
|
|||
done
|
||||
}
|
||||
|
||||
function destroy() {
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||
echo -n "[WARN] DELETE cluster: $CLUSTER, Continue? (y/n): "
|
||||
read -t10 -n1 ANS || { echo -e "\n[WARN] timeout, destroy aborted"; return 1; }
|
||||
if [[ -n $ANS && $ANS == y ]];then
|
||||
echo -e "\n[INFO] clean all nodes of cluster in 5s"
|
||||
sleep 5
|
||||
ansible-playbook $BASEPATH/99.clean.yml
|
||||
rm -f $BASEPATH/.cluster/$CLUSTER/config
|
||||
[ "$#" -gt 0 ] || { return 0; }
|
||||
if [[ -n $1 && $1 == --purge ]];then
|
||||
echo "[INFO] delete current context"
|
||||
rm -rf $BASEPATH/.cluster/$CLUSTER
|
||||
rm -rf $BASEPATH/hosts /root/.kube/*
|
||||
echo "[INFO] change current context to default"
|
||||
echo default > $BASEPATH/.cluster/current_cluster
|
||||
install_context
|
||||
fi
|
||||
else
|
||||
echo -e "\n[WARN] destroy aborted"; return 1;
|
||||
fi
|
||||
}
|
||||
|
||||
function start-aio(){
|
||||
checkout aio
|
||||
set +u
|
||||
|
@ -436,89 +517,110 @@ function basic-auth(){
|
|||
}
|
||||
|
||||
### Main Lines ##################################################
|
||||
function main() {
|
||||
BASEPATH=$(cd `dirname $0`; pwd)
|
||||
cd "$BASEPATH"
|
||||
|
||||
BASEPATH=/etc/kubeasz
|
||||
# check workdir
|
||||
[[ "$BASEPATH" == "/etc/kubeasz" ]] || { logger error "workdir should be '/etc/kubeasz'"; exit 1; }
|
||||
|
||||
[ "$#" -gt 0 ] || { usage >&2; exit 2; }
|
||||
# check bash shell
|
||||
readlink /proc/$$/exe|grep -q "dash" && { logger error "you should use bash shell only"; exit 1; }
|
||||
|
||||
case "$1" in
|
||||
### in-cluster operations #####################
|
||||
(add-etcd)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a etcd node"
|
||||
CMD="add-etcd $2"
|
||||
;;
|
||||
(add-master)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a k8s master node"
|
||||
CMD="add-master $2 ${@:3}"
|
||||
;;
|
||||
(add-node)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a k8s work node"
|
||||
CMD="add-node $2 ${@:3}"
|
||||
;;
|
||||
(del-etcd)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a etcd node"
|
||||
CMD="del-etcd $2"
|
||||
;;
|
||||
(del-master)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a kube-master"
|
||||
CMD="del-master $2"
|
||||
;;
|
||||
(del-node)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a kube-node"
|
||||
CMD="del-node $2"
|
||||
;;
|
||||
(upgrade)
|
||||
ACTION="Action: upgrade the cluster"
|
||||
CMD="upgrade"
|
||||
;;
|
||||
### cluster-wide operations #######################
|
||||
(checkout)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: checkout cluster context"
|
||||
CMD="checkout $2"
|
||||
;;
|
||||
(destroy)
|
||||
ACTION="Action: destroy current cluster"
|
||||
if [ "$#" -gt 1 ];then
|
||||
CMD="destroy $2"
|
||||
else
|
||||
CMD="destroy"
|
||||
fi
|
||||
;;
|
||||
(list)
|
||||
ACTION="Action: list all of clusters managed"
|
||||
CMD="list"
|
||||
;;
|
||||
(setup)
|
||||
ACTION="Action: setup cluster with current context"
|
||||
CMD="setup"
|
||||
;;
|
||||
(start-aio)
|
||||
ACTION="Action: start an AllInOne cluster"
|
||||
CMD="start-aio"
|
||||
;;
|
||||
(help)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
help-info $2
|
||||
exit 0
|
||||
;;
|
||||
### extra operations ##############################
|
||||
(basic-auth)
|
||||
[ "$#" -gt 1 ] || { help-info $1; exit 2; }
|
||||
ACTION="Action: enable/disable apiserver's basic-auth"
|
||||
CMD="basic-auth $*"
|
||||
;;
|
||||
(*)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
process_cmd
|
||||
# check 'ansible' executable
|
||||
which ansible > /dev/null 2>&1 || { logger error "need 'ansible', try: 'pip install ansible==2.6.18'"; exit 1; }
|
||||
|
||||
[ "$#" -gt 0 ] || { usage >&2; exit 2; }
|
||||
|
||||
case "$1" in
|
||||
### in-cluster operations #####################
|
||||
(add-etcd)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a etcd node"
|
||||
CMD="add-etcd $2"
|
||||
;;
|
||||
(add-master)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a k8s master node"
|
||||
CMD="add-master $2 ${@:3}"
|
||||
;;
|
||||
(add-node)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: add a k8s work node"
|
||||
CMD="add-node $2 ${@:3}"
|
||||
;;
|
||||
(del-etcd)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a etcd node"
|
||||
CMD="del-etcd $2"
|
||||
;;
|
||||
(del-master)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a kube-master"
|
||||
CMD="del-master $2"
|
||||
;;
|
||||
(del-node)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
ACTION="Action: delete a kube-node"
|
||||
CMD="del-node $2"
|
||||
;;
|
||||
### cluster-wide operations #######################
|
||||
(list)
|
||||
ACTION="Action: list all of clusters managed"
|
||||
CMD="list"
|
||||
;;
|
||||
(new)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
new "$2"
|
||||
;;
|
||||
(setup)
|
||||
[ "$#" -eq 3 ] || { usage-setup >&2; exit 2; }
|
||||
setup "${@:2}"
|
||||
;;
|
||||
(start)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
cmd "$2" start
|
||||
;;
|
||||
(stop)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
cmd "$2" stop
|
||||
;;
|
||||
(upgrade)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
cmd "$2" upgrade
|
||||
;;
|
||||
(backup)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
cmd "$2" backup
|
||||
;;
|
||||
(restore)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
cmd "$2" restore
|
||||
;;
|
||||
(destroy)
|
||||
[ "$#" -eq 2 ] || { usage >&2; exit 2; }
|
||||
cmd "$2" destroy
|
||||
;;
|
||||
(start-aio)
|
||||
ACTION="Action: start an AllInOne cluster"
|
||||
CMD="start-aio"
|
||||
;;
|
||||
(help)
|
||||
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
|
||||
help-info $2
|
||||
exit 0
|
||||
;;
|
||||
### extra operations ##############################
|
||||
(basic-auth)
|
||||
[ "$#" -gt 1 ] || { help-info $1; exit 2; }
|
||||
ACTION="Action: enable/disable apiserver's basic-auth"
|
||||
CMD="basic-auth $*"
|
||||
;;
|
||||
(*)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
|
7
ezdown
7
ezdown
|
@ -21,10 +21,13 @@ SYS_PKG_VER=0.3.3
|
|||
|
||||
# images needed by k8s cluster
|
||||
calicoVer=v3.15.3
|
||||
ciliumVer=v1.4.1
|
||||
flannelVer=v0.13.0-amd64
|
||||
kubeRouterVer=v0.3.1
|
||||
kubeOvnVer=v1.5.3
|
||||
corednsVer=1.7.1
|
||||
dashboardVer=v2.1.0
|
||||
dashboardMetricsScraperVer=v1.0.6
|
||||
flannelVer=v0.13.0-amd64
|
||||
metricsVer=v0.3.6
|
||||
pauseVer=3.2
|
||||
|
||||
|
@ -243,7 +246,7 @@ function get_offline_image() {
|
|||
docker pull "calico/pod2daemon-flexvol:${calicoVer}" && \
|
||||
docker pull "calico/kube-controllers:${calicoVer}" && \
|
||||
docker pull "calico/node:${calicoVer}" && \
|
||||
docker save -o ${imageDir}/calico_${calicoVer}.tar calico/cni:${calicoVer}-1 calico/kube-controllers:${calicoVer} calico/node:${calicoVer}-1 calico/pod2daemon-flexvol:${calicoVer}
|
||||
docker save -o ${imageDir}/calico_${calicoVer}.tar calico/cni:${calicoVer} calico/kube-controllers:${calicoVer} calico/node:${calicoVer} calico/pod2daemon-flexvol:${calicoVer}
|
||||
fi
|
||||
if [[ ! -f "$imageDir/coredns_$corednsVer.tar" ]];then
|
||||
docker pull coredns/coredns:${corednsVer} && \
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
# to install docker service
|
||||
- hosts:
|
||||
- kube-master
|
||||
- kube-node
|
||||
roles:
|
||||
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
|
@ -1,6 +1,7 @@
|
|||
# to install containerd service
|
||||
# to install a container runtime
|
||||
- hosts:
|
||||
- kube-master
|
||||
- kube-node
|
||||
roles:
|
||||
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
||||
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
||||
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
|
@ -66,7 +66,7 @@
|
|||
service: name=containerd state=restarted
|
||||
when:
|
||||
- 'CONTAINER_RUNTIME == "containerd"'
|
||||
- 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
- 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
when: hostvars[groups.harbor[0]]['SELF_SIGNED_CERT'] == 'yes'
|
||||
|
||||
# [optional] if you have a DNS server, add an 'A record' instead
|
||||
|
@ -93,4 +93,4 @@
|
|||
state: present
|
||||
regexp: '{{ harbor_hostname }}'
|
||||
line: "{{ groups['harbor'][0] }} {{ harbor_hostname }}"
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
||||
--endpoints=https://"$ip":2379 \
|
||||
--cacert={{ cluster_dir }}/ssl/ca.pem \
|
||||
--cert={{ cluster_dir }}/ssl/admin.pem \
|
||||
--key={{ cluster_dir }}/ssl/admin-key.pem \
|
||||
--cert={{ cluster_dir }}/ssl/etcd.pem \
|
||||
--key={{ cluster_dir }}/ssl/etcd-key.pem \
|
||||
endpoint health; \
|
||||
done'
|
||||
register: ETCD_CLUSTER_STATUS
|
||||
|
|
|
@ -25,8 +25,8 @@
|
|||
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
||||
--endpoints=https://"$ip":2379 \
|
||||
--cacert={{ cluster_dir }}/ssl/ca.pem \
|
||||
--cert={{ cluster_dir }}/ssl/admin.pem \
|
||||
--key={{ cluster_dir }}/ssl/admin-key.pem \
|
||||
--cert={{ cluster_dir }}/ssl/etcd.pem \
|
||||
--key={{ cluster_dir }}/ssl/etcd-key.pem \
|
||||
endpoint health; \
|
||||
done'
|
||||
register: ETCD_CLUSTER_STATUS
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
||||
--endpoints=https://"$ip":2379 \
|
||||
--cacert={{ cluster_dir }}/ssl/ca.pem \
|
||||
--cert={{ cluster_dir }}/ssl/admin.pem \
|
||||
--key={{ cluster_dir }}/ssl/admin-key.pem \
|
||||
--cert={{ cluster_dir }}/ssl/etcd.pem \
|
||||
--key={{ cluster_dir }}/ssl/etcd-key.pem \
|
||||
endpoint health; \
|
||||
done'
|
||||
register: ETCD_CLUSTER_STATUS
|
||||
|
|
|
@ -1,20 +1,28 @@
|
|||
- name: 在节点创建相关目录
|
||||
file: name={{ item }} state=directory
|
||||
with_items:
|
||||
- /etc/calico/ssl
|
||||
- /etc/cni/net.d
|
||||
- /opt/kube/images
|
||||
- /opt/kube/kube-system
|
||||
|
||||
- name: 创建calico 证书请求
|
||||
template: src=calico-csr.json.j2 dest=/etc/calico/ssl/calico-csr.json
|
||||
template: src=calico-csr.json.j2 dest={{ cluster_dir }}/ssl/calico-csr.json
|
||||
connection: local
|
||||
|
||||
- name: 创建 calico证书和私钥
|
||||
shell: "cd /etc/calico/ssl && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes calico-csr.json | {{ bin_dir }}/cfssljson -bare calico"
|
||||
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes calico-csr.json|{{ base_dir }}/bin/cfssljson -bare calico"
|
||||
connection: local
|
||||
|
||||
- name: 分发calico证书相关
|
||||
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||
with_items:
|
||||
- ca.pem
|
||||
- calico.pem
|
||||
- calico-key.pem
|
||||
|
||||
- name: get calico-etcd-secrets info
|
||||
shell: "{{ bin_dir }}/kubectl get secrets -n kube-system"
|
||||
|
@ -22,9 +30,9 @@
|
|||
run_once: true
|
||||
|
||||
- name: 创建 calico-etcd-secrets
|
||||
shell: "cd /etc/calico/ssl && \
|
||||
shell: "cd {{ ca_dir }} && \
|
||||
{{ bin_dir }}/kubectl create secret generic -n kube-system calico-etcd-secrets \
|
||||
--from-file=etcd-ca={{ ca_dir }}/ca.pem \
|
||||
--from-file=etcd-ca=ca.pem \
|
||||
--from-file=etcd-key=calico-key.pem \
|
||||
--from-file=etcd-cert=calico.pem"
|
||||
when: '"calico-etcd-secrets" not in secrets_info.stdout'
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
- name: yum 卸载 ntp
|
||||
shell: 'yum remove -y ntp'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
args:
|
||||
warn: false
|
||||
ignore_errors: true
|
||||
|
@ -28,7 +28,7 @@
|
|||
- block:
|
||||
- name: 配置 chrony server
|
||||
template: src=server-centos.conf.j2 dest=/etc/chrony.conf
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
|
||||
- name: 配置 chrony server
|
||||
template: src=server-ubuntu.conf.j2 dest=/etc/chrony/chrony.conf
|
||||
|
@ -36,7 +36,7 @@
|
|||
|
||||
- name: 启动 chrony server
|
||||
service: name=chronyd state=restarted enabled=yes
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
|
||||
- name: 启动 chrony server
|
||||
service: name=chrony state=restarted enabled=yes
|
||||
|
@ -46,7 +46,7 @@
|
|||
- block:
|
||||
- name: 配置 chrony client
|
||||
template: src=client-centos.conf.j2 dest=/etc/chrony.conf
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
|
||||
- name: 配置 chrony client
|
||||
template: src=client-ubuntu.conf.j2 dest=/etc/chrony/chrony.conf
|
||||
|
@ -54,7 +54,7 @@
|
|||
|
||||
- name: 启动 chrony client
|
||||
service: name=chronyd state=restarted enabled=yes
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
|
||||
- name: 启动 chrony client
|
||||
service: name=chrony state=restarted enabled=yes
|
||||
|
|
|
@ -7,5 +7,5 @@
|
|||
- name: stop and disable chronyd in CentOS/RedHat
|
||||
service: name=chronyd state=stopped enabled=no
|
||||
ignore_errors: true
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
when: "groups['chrony']|length > 0"
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
file: name={{ item }} state=absent
|
||||
with_items:
|
||||
- "/var/lib/etcd"
|
||||
- "/etc/etcd/"
|
||||
- "/backup/k8s"
|
||||
- "/etc/systemd/system/etcd.service"
|
||||
when: "inventory_hostname in groups['etcd']"
|
||||
|
|
|
@ -3,13 +3,6 @@
|
|||
with_items:
|
||||
- /opt/kube/kube-system
|
||||
|
||||
# DNS文件中部分参数根据hosts文件设置而定,因此需要用template模块替换参数
|
||||
- name: 准备 DNS的部署文件
|
||||
template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}.yaml
|
||||
with_items:
|
||||
- kubedns
|
||||
- coredns
|
||||
|
||||
- name: 获取所有已经创建的POD信息
|
||||
command: "{{ bin_dir }}/kubectl get pod --all-namespaces"
|
||||
register: pod_info
|
||||
|
@ -23,27 +16,29 @@
|
|||
|
||||
- block:
|
||||
- block:
|
||||
- name: 尝试推送离线{{ dns_backend }}镜像(若执行失败,可忽略)
|
||||
copy: src={{ base_dir }}/down/{{ dns_offline }} dest=/opt/kube/images/{{ dns_offline }}
|
||||
when: 'dns_offline in download_info.stdout'
|
||||
- name: 尝试推送离线coredns镜像(若执行失败,可忽略)
|
||||
copy: src={{ base_dir }}/down/{{ coredns_offline }} dest=/opt/kube/images/{{ coredns_offline }}
|
||||
when: 'coredns_offline in download_info.stdout'
|
||||
|
||||
- name: 获取{{ dns_backend }}离线镜像推送情况
|
||||
- name: 获取coredns离线镜像推送情况
|
||||
command: "ls /opt/kube/images"
|
||||
register: image_info
|
||||
|
||||
- name: 导入{{ dns_backend }}的离线镜像(若执行失败,可忽略)
|
||||
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ dns_offline }}"
|
||||
when: 'dns_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
|
||||
- name: 导入coredns的离线镜像(若执行失败,可忽略)
|
||||
shell: "{{ bin_dir }}/docker load -i /opt/kube/images/{{ coredns_offline }}"
|
||||
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "docker"'
|
||||
|
||||
- name: 导入{{ dns_backend }}的离线镜像(若执行失败,可忽略)
|
||||
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ dns_offline }}"
|
||||
when: 'dns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||
- name: 导入coredns的离线镜像(若执行失败,可忽略)
|
||||
shell: "{{ bin_dir }}/ctr -n=k8s.io images import /opt/kube/images/{{ coredns_offline }}"
|
||||
when: 'coredns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||
|
||||
- name: 创建{{ dns_backend }}部署
|
||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}.yaml"
|
||||
- name: 准备 DNS的部署文件
|
||||
template: src=coredns.yaml.j2 dest=/opt/kube/kube-system/coredns.yaml
|
||||
|
||||
- name: 创建coredns部署
|
||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/coredns.yaml"
|
||||
run_once: true
|
||||
when:
|
||||
- '"kube-dns" not in pod_info.stdout'
|
||||
- '"coredns" not in pod_info.stdout'
|
||||
- 'dns_install == "yes"'
|
||||
ignore_errors: true
|
||||
|
|
|
@ -2,14 +2,7 @@
|
|||
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
||||
|
||||
#
|
||||
kubednsVer: "1.14.13"
|
||||
kubedns_offline: "kubedns_{{ kubednsVer }}.tar"
|
||||
coredns_offline: "coredns_{{ corednsVer }}.tar"
|
||||
dns_offline: "{%- if dns_backend == 'coredns' -%} \
|
||||
{{ coredns_offline }} \
|
||||
{%- else -%} \
|
||||
{{ kubedns_offline }} \
|
||||
{%- endif -%}"
|
||||
|
||||
metricsserver_offline: "metrics-server_{{ metricsVer }}.tar"
|
||||
|
||||
|
|
|
@ -39,8 +39,8 @@
|
|||
- import_tasks: create-kube-scheduler-kubeconfig.yml
|
||||
|
||||
# ansible 控制端一些易用性配置
|
||||
- name: 本地创建 easzctl 工具的软连接
|
||||
file: src={{ base_dir }}/tools/easzctl dest=/usr/bin/easzctl state=link
|
||||
- name: 本地创建 ezctl 工具的软连接
|
||||
file: src={{ base_dir }}/ezctl dest=/usr/bin/ezctl state=link
|
||||
|
||||
- name: ansible 控制端创建 kubectl 软链接
|
||||
file: src={{ base_dir }}/bin/kubectl dest=/usr/bin/kubectl state=link
|
||||
|
|
|
@ -13,6 +13,5 @@
|
|||
file: name={{ item }} state=absent
|
||||
with_items:
|
||||
- "/var/lib/etcd"
|
||||
- "/etc/etcd/"
|
||||
- "/backup/k8s"
|
||||
- "/etc/systemd/system/etcd.service"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
with_items:
|
||||
- "{{ bin_dir }}"
|
||||
- "{{ ca_dir }}"
|
||||
- "/etc/etcd/ssl" # etcd 证书目录
|
||||
- "/var/lib/etcd" # etcd 工作目录
|
||||
|
||||
- name: 下载etcd二进制文件
|
||||
|
@ -13,22 +12,24 @@
|
|||
- etcdctl
|
||||
tags: upgrade_etcd
|
||||
|
||||
- name: 分发证书相关
|
||||
- name: 创建etcd证书请求
|
||||
template: src=etcd-csr.json.j2 dest={{ cluster_dir }}/ssl/etcd-csr.json
|
||||
connection: local
|
||||
|
||||
- name: 创建 etcd证书和私钥
|
||||
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes etcd-csr.json | {{ base_dir }}/bin/cfssljson -bare etcd"
|
||||
connection: local
|
||||
|
||||
- name: 分发etcd证书相关
|
||||
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||
with_items:
|
||||
- ca.pem
|
||||
- ca-key.pem
|
||||
- ca-config.json
|
||||
|
||||
- name: 创建etcd证书请求
|
||||
template: src=etcd-csr.json.j2 dest=/etc/etcd/ssl/etcd-csr.json
|
||||
|
||||
- name: 创建 etcd证书和私钥
|
||||
shell: "cd /etc/etcd/ssl && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes etcd-csr.json | {{ bin_dir }}/cfssljson -bare etcd"
|
||||
- etcd.pem
|
||||
- etcd-key.pem
|
||||
|
||||
- name: 创建etcd的systemd unit文件
|
||||
template: src=etcd.service.j2 dest=/etc/systemd/system/etcd.service
|
||||
|
|
|
@ -10,10 +10,10 @@ Type=notify
|
|||
WorkingDirectory=/var/lib/etcd/
|
||||
ExecStart={{ bin_dir }}/etcd \
|
||||
--name={{ NODE_NAME }} \
|
||||
--cert-file=/etc/etcd/ssl/etcd.pem \
|
||||
--key-file=/etc/etcd/ssl/etcd-key.pem \
|
||||
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
|
||||
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
|
||||
--cert-file={{ ca_dir }}/etcd.pem \
|
||||
--key-file={{ ca_dir }}/etcd-key.pem \
|
||||
--peer-cert-file={{ ca_dir }}/etcd.pem \
|
||||
--peer-key-file={{ ca_dir }}/etcd-key.pem \
|
||||
--trusted-ca-file={{ ca_dir }}/ca.pem \
|
||||
--peer-trusted-ca-file={{ ca_dir }}/ca.pem \
|
||||
--initial-advertise-peer-urls=https://{{ inventory_hostname }}:2380 \
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
- name: stop and disable chronyd in CentOS/RedHat
|
||||
service: name=chronyd state=stopped enabled=no
|
||||
ignore_errors: true
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
when: "groups['chrony']|length > 0"
|
||||
|
||||
- name: stop keepalived service
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
|
||||
- name: 修改centos的haproxy.service
|
||||
template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
tags: restart_lb
|
||||
|
||||
- name: daemon-reload for haproxy.service
|
||||
|
|
|
@ -33,22 +33,23 @@
|
|||
shell: "{{ bin_dir }}/docker load -i /data/harbor/harbor.{{ HARBOR_VER }}.tar.gz"
|
||||
|
||||
- block:
|
||||
- name: 创建 harbor 证书请求
|
||||
template: src=harbor-csr.json.j2 dest={{ cluster_dir }}/ssl/harbor-csr.json
|
||||
connection: local
|
||||
|
||||
- name: 创建 harbor 证书和私钥
|
||||
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes harbor-csr.json|{{ base_dir }}/bin/cfssljson -bare harbor"
|
||||
connection: local
|
||||
|
||||
- name: 生成自签名证书相关
|
||||
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||
with_items:
|
||||
- ca.pem
|
||||
- ca-key.pem
|
||||
- ca-config.json
|
||||
|
||||
- name: 创建 harbor 证书请求
|
||||
template: src=harbor-csr.json.j2 dest={{ ca_dir }}/harbor-csr.json
|
||||
|
||||
- name: 创建 harbor 证书和私钥
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes harbor-csr.json | {{ bin_dir }}/cfssljson -bare harbor"
|
||||
- harbor.pem
|
||||
- harbor-key.pem
|
||||
when: SELF_SIGNED_CERT == 'yes'
|
||||
|
||||
- name: 复制 down 目录下 harbor 证书
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
helm_namespace: kube-system
|
||||
helm_cert_cn: helm001
|
||||
tiller_sa: tiller
|
||||
tiller_cert_cn: tiller001
|
||||
tiller_image: easzlab/tiller:v2.14.1
|
||||
repo_url: https://kubernetes-charts.storage.googleapis.com
|
||||
history_max: 5
|
||||
# 如果默认官方repo 网络访问不稳定可以使用如下的阿里云镜像repo
|
||||
#repo_url: https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
|
|
@ -1,3 +0,0 @@
|
|||
- hosts: "{{ groups['kube-master'][0] }}"
|
||||
roles:
|
||||
- helm
|
|
@ -1,71 +0,0 @@
|
|||
- name: 下载helm客户端
|
||||
copy: src={{ base_dir }}/bin/helm dest={{ bin_dir }}/helm mode=0755
|
||||
|
||||
- name: 创建helm 客户端证书请求
|
||||
template: src=helm-csr.json.j2 dest={{ ca_dir }}/{{ helm_cert_cn }}-csr.json
|
||||
|
||||
- name: 创建helm 客户端证书
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes {{ helm_cert_cn }}-csr.json | {{ bin_dir }}/cfssljson -bare {{ helm_cert_cn }}"
|
||||
|
||||
- name: 创建tiller 服务端证书请求
|
||||
template: src=tiller-csr.json.j2 dest={{ ca_dir }}/{{ tiller_cert_cn }}-csr.json
|
||||
|
||||
- name: 创建tiller 服务端证书和私钥
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes {{ tiller_cert_cn }}-csr.json | {{ bin_dir }}/cfssljson -bare {{ tiller_cert_cn }}"
|
||||
|
||||
- name: 获取当前集群所有 namespaces
|
||||
shell: "{{ bin_dir }}/kubectl get ns"
|
||||
register: current_ns
|
||||
run_once: true
|
||||
|
||||
- name: 准备rbac配置
|
||||
template: src=helm-rbac.yaml.j2 dest=/opt/kube/helm-rbac.yaml
|
||||
|
||||
- name: 在k8s上创建rbac
|
||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/helm-rbac.yaml"
|
||||
ignore_errors: true
|
||||
run_once: true
|
||||
|
||||
- name: 安装tiller
|
||||
shell: "{{ bin_dir }}/helm init \
|
||||
--history-max {{ history_max }} \
|
||||
--tiller-tls \
|
||||
--tiller-tls-verify \
|
||||
--tiller-tls-cert {{ ca_dir }}/{{ tiller_cert_cn }}.pem \
|
||||
--tiller-tls-key {{ ca_dir }}/{{ tiller_cert_cn }}-key.pem \
|
||||
--tls-ca-cert {{ ca_dir }}/ca.pem \
|
||||
--service-account {{ tiller_sa }} \
|
||||
--tiller-namespace {{ helm_namespace }} \
|
||||
--tiller-image {{ tiller_image }} \
|
||||
--stable-repo-url {{ repo_url }} \
|
||||
--upgrade"
|
||||
ignore_errors: true
|
||||
|
||||
- name: 配置helm客户端
|
||||
shell: "cp -f {{ ca_dir }}/ca.pem ~/.helm/ca.pem && \
|
||||
cp -f {{ ca_dir }}/{{ helm_cert_cn }}.pem ~/.helm/cert.pem && \
|
||||
cp -f {{ ca_dir }}/{{ helm_cert_cn }}-key.pem ~/.helm/key.pem"
|
||||
ignore_errors: true
|
||||
|
||||
- name: 添加 helm 命令自动补全
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
state: present
|
||||
regexp: 'helm completion'
|
||||
line: 'source <(helm completion bash)'
|
||||
|
||||
# 为方便与tiller进行安全通信,启用helm tls环境变量;仅支持helm v2.11.0及以上版本
|
||||
- name: 配置helm tls环境变量
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
state: present
|
||||
regexp: "helm tls environment"
|
||||
line: "export HELM_TLS_ENABLE=true"
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"CN": "{{ helm_cert_cn }}",
|
||||
"hosts": [],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "HangZhou",
|
||||
"L": "XS",
|
||||
"O": "k8s",
|
||||
"OU": "System"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
# 绑定helm sa到 cluster-admin,这样可以兼容现有需要集群特权的charts
|
||||
#
|
||||
{% if helm_namespace not in current_ns.stdout %}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ helm_namespace }}
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ tiller_sa }}
|
||||
namespace: {{ helm_namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tiller-cb
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ tiller_sa }}
|
||||
namespace: {{ helm_namespace }}
|
|
@ -1,62 +0,0 @@
|
|||
# 限制helm应用只允许部署在指定namespace
|
||||
# 可以配合NetworkPolicy等实现namespace间网络完全隔离
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ helm_namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ tiller_sa }}
|
||||
namespace: {{ helm_namespace }}
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tiller-manager
|
||||
namespace: {{ helm_namespace }}
|
||||
rules:
|
||||
- apiGroups: ["", "extensions", "apps"]
|
||||
resources: ["*"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tiller-binding
|
||||
namespace: {{ helm_namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ tiller_sa }}
|
||||
namespace: {{ helm_namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: tiller-manager
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
#
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tiller-cluster-manager
|
||||
rules:
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources:
|
||||
- clusterroles
|
||||
- clusterrolebindings
|
||||
verbs: ["*"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tiller-cluster-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: tiller-cluster-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ tiller_sa }}
|
||||
namespace: {{ helm_namespace }}
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"CN": "{{ tiller_cert_cn }}",
|
||||
"hosts": [],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "HangZhou",
|
||||
"L": "XS",
|
||||
"O": "k8s",
|
||||
"OU": "System"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -8,27 +8,41 @@
|
|||
tags: upgrade_k8s
|
||||
|
||||
- name: 创建 kubernetes 证书签名请求
|
||||
template: src=kubernetes-csr.json.j2 dest={{ ca_dir }}/kubernetes-csr.json
|
||||
template: src=kubernetes-csr.json.j2 dest={{ cluster_dir }}/ssl/kubernetes-csr.json
|
||||
tags: change_cert
|
||||
connection: local
|
||||
|
||||
- name: 创建 kubernetes 证书和私钥
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes kubernetes-csr.json | {{ bin_dir }}/cfssljson -bare kubernetes"
|
||||
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes kubernetes-csr.json | {{ base_dir }}/bin/cfssljson -bare kubernetes"
|
||||
tags: change_cert
|
||||
connection: local
|
||||
|
||||
# 创建aggregator proxy相关证书
|
||||
- name: 创建 aggregator proxy证书签名请求
|
||||
template: src=aggregator-proxy-csr.json.j2 dest={{ ca_dir }}/aggregator-proxy-csr.json
|
||||
template: src=aggregator-proxy-csr.json.j2 dest={{ cluster_dir }}/ssl/aggregator-proxy-csr.json
|
||||
connection: local
|
||||
|
||||
- name: 创建 aggregator-proxy证书和私钥
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes aggregator-proxy-csr.json | {{ bin_dir }}/cfssljson -bare aggregator-proxy"
|
||||
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes aggregator-proxy-csr.json | {{ base_dir }}/bin/cfssljson -bare aggregator-proxy"
|
||||
connection: local
|
||||
|
||||
- name: 分发 kubernetes证书
|
||||
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||
with_items:
|
||||
- ca.pem
|
||||
- ca-key.pem
|
||||
- kubernetes.pem
|
||||
- kubernetes-key.pem
|
||||
- aggregator-proxy.pem
|
||||
- aggregator-proxy-key.pem
|
||||
|
||||
- name: 替换 kubeconfig 的 apiserver 地址
|
||||
lineinfile:
|
||||
|
@ -65,3 +79,13 @@
|
|||
retries: 5
|
||||
delay: 6
|
||||
tags: upgrade_k8s, restart_master
|
||||
|
||||
- name: 获取user:kubernetes是否已经绑定对应角色
|
||||
shell: "{{ bin_dir }}/kubectl get clusterrolebindings|grep kubernetes-crb || echo 'notfound'"
|
||||
register: crb_info
|
||||
run_once: true
|
||||
|
||||
- name: 创建user:kubernetes角色绑定
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding kubernetes-crb --clusterrole=cluster-admin --user=kubernetes"
|
||||
run_once: true
|
||||
when: "'notfound' in crb_info.stdout"
|
||||
|
|
|
@ -18,8 +18,8 @@ ExecStart={{ bin_dir }}/kube-apiserver \
|
|||
--etcd-keyfile={{ ca_dir }}/kubernetes-key.pem \
|
||||
--etcd-servers={{ ETCD_ENDPOINTS }} \
|
||||
--kubelet-certificate-authority={{ ca_dir }}/ca.pem \
|
||||
--kubelet-client-certificate={{ ca_dir }}/admin.pem \
|
||||
--kubelet-client-key={{ ca_dir }}/admin-key.pem \
|
||||
--kubelet-client-certificate={{ ca_dir }}/kubernetes.pem \
|
||||
--kubelet-client-key={{ ca_dir }}/kubernetes-key.pem \
|
||||
--service-account-issuer=kubernetes.default.svc \
|
||||
--service-account-signing-key-file={{ ca_dir }}/ca-key.pem \
|
||||
--service-account-key-file={{ ca_dir }}/ca.pem \
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
{% if groups['ex-lb']|length > 0 %}
|
||||
"{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}",
|
||||
{% endif %}
|
||||
"{{ inventory_hostname }}",
|
||||
{% for host in groups['kube-master'] %}
|
||||
"{{ host }}",
|
||||
{% endfor %}
|
||||
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
||||
{% for host in MASTER_CERT_HOSTS %}
|
||||
"{{ host }}",
|
||||
|
|
|
@ -1,12 +1,21 @@
|
|||
- name: 准备kubelet 证书签名请求
|
||||
template: src=kubelet-csr.json.j2 dest={{ ca_dir }}/kubelet-csr.json
|
||||
template: src=kubelet-csr.json.j2 dest={{ cluster_dir }}/ssl/kubelet-csr.json
|
||||
connection: local
|
||||
|
||||
- name: 创建 kubelet 证书与私钥
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
-ca={{ ca_dir }}/ca.pem \
|
||||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes kubelet-csr.json | {{ bin_dir }}/cfssljson -bare kubelet"
|
||||
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=kubernetes kubelet-csr.json | {{ base_dir }}/bin/cfssljson -bare kubelet"
|
||||
connection: local
|
||||
|
||||
- name: 分发kubelet证书相关
|
||||
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||
with_items:
|
||||
- ca.pem
|
||||
- kubelet.pem
|
||||
- kubelet-key.pem
|
||||
|
||||
# 创建kubelet.kubeconfig
|
||||
- name: 设置集群参数
|
||||
|
|
|
@ -18,8 +18,7 @@
|
|||
- name: 修改centos的haproxy.service
|
||||
template: src=haproxy.service.j2 dest=/usr/lib/systemd/system/haproxy.service
|
||||
when:
|
||||
- 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
- 'ansible_distribution_major_version == "7"'
|
||||
- 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
tags: restart_lb
|
||||
|
||||
- name: 配置 haproxy
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
{
|
||||
"CN": "system:node:{{ inventory_hostname }}",
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"{{ inventory_hostname }}"
|
||||
{% for host in groups['kube-node'] %}
|
||||
"{{ host }}",
|
||||
{% endfor %}
|
||||
"127.0.0.1"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
|
|
|
@ -8,14 +8,6 @@
|
|||
warn: false
|
||||
ignore_errors: true
|
||||
|
||||
#- name: 添加EPEL仓库
|
||||
# yum: name=epel-release state=present
|
||||
|
||||
- name: 添加Amazon EPEL仓库
|
||||
shell: "amazon-linux-extras install epel"
|
||||
when: ansible_distribution == "Amazon"
|
||||
ignore_errors: true
|
||||
|
||||
- name: 安装基础软件包
|
||||
yum:
|
||||
name:
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
when: 'ansible_distribution in ["Ubuntu","Debian"]'
|
||||
|
||||
- import_tasks: centos.yml
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon"]'
|
||||
when: 'ansible_distribution in ["CentOS","RedHat","Amazon","Aliyun"]'
|
||||
|
||||
# 公共系统参数设置
|
||||
- import_tasks: common.yml
|
||||
|
@ -22,13 +22,6 @@
|
|||
- "{{ ca_dir }}"
|
||||
- /root/.kube
|
||||
|
||||
- name: 分发证书工具 CFSSL
|
||||
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
|
||||
with_items:
|
||||
- cfssl
|
||||
- cfssl-certinfo
|
||||
- cfssljson
|
||||
|
||||
- name: 写入环境变量$PATH
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
|
@ -37,15 +30,6 @@
|
|||
line: 'export PATH={{ bin_dir }}:$PATH # generated by kubeasz'
|
||||
|
||||
- block:
|
||||
- name: 分发证书相关
|
||||
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||
with_items:
|
||||
- admin.pem
|
||||
- admin-key.pem
|
||||
- ca.pem
|
||||
- ca-key.pem
|
||||
- ca-config.json
|
||||
|
||||
- name: 添加 kubectl 命令自动补全
|
||||
lineinfile:
|
||||
dest: ~/.bashrc
|
||||
|
@ -57,13 +41,12 @@
|
|||
copy: src=/root/.kube/config dest=/root/.kube/config
|
||||
|
||||
- name: 分发 kube-proxy.kubeconfig配置文件
|
||||
copy: src={{ cluster_dir }}/kube-proxy.kubeconfig dest={{ cluster_dir }}/kube-proxy.kubeconfig
|
||||
copy: src={{ cluster_dir }}/kube-proxy.kubeconfig dest=/etc/kubernetes/kube-proxy.kubeconfig
|
||||
|
||||
- name: 分发 kube-controller-manager.kubeconfig配置文件
|
||||
copy: src={{ cluster_dir }}/kube-controller-manager.kubeconfig dest={{ cluster_dir }}/kube-controller-manager.kubeconfig
|
||||
when: "inventory_hostname in groups['kube-master']"
|
||||
|
||||
- name: 分发 kube-scheduler.kubeconfig配置文件
|
||||
copy: src={{ cluster_dir }}/kube-scheduler.kubeconfig dest={{ cluster_dir }}/kube-scheduler.kubeconfig
|
||||
- name: 分发controller/scheduler kubeconfig配置文件
|
||||
copy: src={{ cluster_dir }}/{{ item }} dest=/etc/kubernetes/{{ item }}
|
||||
with_items:
|
||||
- kube-controller-manager.kubeconfig
|
||||
- kube-scheduler.kubeconfig
|
||||
when: "inventory_hostname in groups['kube-master']"
|
||||
when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']"
|
||||
|
|
Loading…
Reference in New Issue