mirror of https://github.com/easzlab/kubeasz.git
init commit for dev-3.0
parent
185d62ea41
commit
c69be3f0da
|
@ -2,12 +2,8 @@ down/*
|
||||||
!down/download.sh
|
!down/download.sh
|
||||||
!down/offline_images
|
!down/offline_images
|
||||||
|
|
||||||
# k8s binaries directory
|
# binaries directory
|
||||||
bin/*
|
bin/*
|
||||||
!bin/readme.md
|
|
||||||
|
|
||||||
# ansible hosts
|
|
||||||
hosts
|
|
||||||
|
|
||||||
# k8s storage manifests
|
# k8s storage manifests
|
||||||
manifests/storage/*
|
manifests/storage/*
|
||||||
|
@ -18,8 +14,8 @@ roles/cluster-backup/files/*
|
||||||
!roles/cluster-backup/files/readme.md
|
!roles/cluster-backup/files/readme.md
|
||||||
|
|
||||||
# role based variable settings, exclude roles/os-harden/vars/
|
# role based variable settings, exclude roles/os-harden/vars/
|
||||||
/roles/*/vars/*
|
#/roles/*/vars/*
|
||||||
!/roles/os-harden/vars/
|
#!/roles/os-harden/vars/
|
||||||
|
|
||||||
# cluster backups
|
# cluster backups
|
||||||
.cluster/
|
.cluster/
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
gathering = smart
|
gathering = smart
|
||||||
|
|
||||||
# additional paths to search for roles in, colon separated
|
# additional paths to search for roles in, colon separated
|
||||||
roles_path = /etc/ansible/roles
|
roles_path = /etc/kubeasz/roles
|
||||||
|
|
||||||
# uncomment this to disable SSH key host checking
|
# uncomment this to disable SSH key host checking
|
||||||
host_key_checking = False
|
host_key_checking = False
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
# Dockerfiles for building images needed
|
|
||||||
|
|
||||||
Please refer to https://github.com/kubeasz/dockerfiles
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# This script describes where to download the official released binaries needed
|
# This script describes where to download the official released binaries needed
|
||||||
# It's suggested to download using 'tools/easzup -D', everything needed will be ready in '/etc/ansible'
|
# It's suggested to download using 'ezdown -D', everything needed will be ready in '/etc/kubeasz'
|
||||||
|
|
||||||
# example releases
|
# example releases
|
||||||
K8S_VER=v1.13.7
|
K8S_VER=v1.13.7
|
||||||
|
@ -11,7 +11,7 @@ DOCKER_COMPOSE_VER=1.23.2
|
||||||
HARBOR_VER=v1.9.4
|
HARBOR_VER=v1.9.4
|
||||||
CONTAINERD_VER=1.2.6
|
CONTAINERD_VER=1.2.6
|
||||||
|
|
||||||
echo -e "\nNote: It's strongly recommended that downloading with 'tools/easzup -D', everything needed will be ready in '/etc/ansible'."
|
echo -e "\nNote: It's strongly recommended that downloading with 'ezdown -D', everything needed will be ready in '/etc/kubeasz'."
|
||||||
|
|
||||||
echo -e "\n----download k8s binary at:"
|
echo -e "\n----download k8s binary at:"
|
||||||
echo -e https://dl.k8s.io/${K8S_VER}/kubernetes-server-linux-amd64.tar.gz
|
echo -e https://dl.k8s.io/${K8S_VER}/kubernetes-server-linux-amd64.tar.gz
|
||||||
|
|
|
@ -0,0 +1,182 @@
|
||||||
|
############################
|
||||||
|
# role:prepare
|
||||||
|
############################
|
||||||
|
# 可选离线安装系统软件包 (offline|online)
|
||||||
|
INSTALL_SOURCE: "online"
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:chrony
|
||||||
|
############################
|
||||||
|
# 设置时间源服务器
|
||||||
|
ntp_servers:
|
||||||
|
- "ntp1.aliyun.com"
|
||||||
|
- "time1.cloud.tencent.com"
|
||||||
|
- "0.cn.pool.ntp.org"
|
||||||
|
|
||||||
|
# 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许
|
||||||
|
local_network: "0.0.0.0/0"
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:deploy
|
||||||
|
############################
|
||||||
|
# default: ca will expire in 100 years
|
||||||
|
# default: certs issued by the ca will expire in 50 years
|
||||||
|
CA_EXPIRY: "876000h"
|
||||||
|
CERT_EXPIRY: "438000h"
|
||||||
|
|
||||||
|
# kubeconfig 配置参数,注意权限根据‘USER_NAME’设置:
|
||||||
|
# 'admin' 表示创建集群管理员(所有)权限的 kubeconfig
|
||||||
|
# 'read' 表示创建只读权限的 kubeconfig
|
||||||
|
CLUSTER_NAME: "cluster1"
|
||||||
|
USER_NAME: "admin"
|
||||||
|
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}-{{ USER_NAME }}"
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:runtime [containerd,docker]
|
||||||
|
############################
|
||||||
|
# [.]启用容器仓库镜像
|
||||||
|
ENABLE_MIRROR_REGISTRY: true
|
||||||
|
|
||||||
|
# [containerd]基础容器镜像
|
||||||
|
SANDBOX_IMAGE: "easzlab/pause-amd64:3.2"
|
||||||
|
|
||||||
|
# [containerd]容器持久化存储目录
|
||||||
|
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"
|
||||||
|
|
||||||
|
# [docker]容器存储目录
|
||||||
|
DOCKER_STORAGE_DIR: "/var/lib/docker"
|
||||||
|
|
||||||
|
# [docker]开启Restful API
|
||||||
|
ENABLE_REMOTE_API: false
|
||||||
|
|
||||||
|
# [docker]信任的HTTP仓库
|
||||||
|
INSECURE_REG: '["127.0.0.1/8"]'
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:kube-master
|
||||||
|
############################
|
||||||
|
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
|
||||||
|
MASTER_CERT_HOSTS:
|
||||||
|
- "10.1.1.1"
|
||||||
|
- "k8s.test.io"
|
||||||
|
#- "www.test.com"
|
||||||
|
|
||||||
|
# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
|
||||||
|
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
|
||||||
|
# https://github.com/coreos/flannel/issues/847
|
||||||
|
NODE_CIDR_LEN: 24
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:kube-node
|
||||||
|
############################
|
||||||
|
# Kubelet 根目录
|
||||||
|
KUBELET_ROOT_DIR: "/var/lib/kubelet"
|
||||||
|
|
||||||
|
# node节点最大pod 数
|
||||||
|
MAX_PODS: 110
|
||||||
|
|
||||||
|
# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量
|
||||||
|
# 数值设置详见templates/kubelet-config.yaml.j2
|
||||||
|
KUBE_RESERVED_ENABLED: "yes"
|
||||||
|
|
||||||
|
# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;
|
||||||
|
# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2
|
||||||
|
# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留
|
||||||
|
# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存
|
||||||
|
SYS_RESERVED_ENABLED: "no"
|
||||||
|
|
||||||
|
# haproxy balance mode
|
||||||
|
BALANCE_ALG: "roundrobin"
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:network [flannel,calico,cilium,kube-ovn,kube-router]
|
||||||
|
############################
|
||||||
|
# [flannel]设置flannel 后端"host-gw","vxlan"等
|
||||||
|
FLANNEL_BACKEND: "vxlan"
|
||||||
|
DIRECT_ROUTING: false
|
||||||
|
|
||||||
|
# [flannel] flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64"
|
||||||
|
flannelVer: "v0.13.0-amd64"
|
||||||
|
flanneld_image: "easzlab/flannel:{{ flannelVer }}"
|
||||||
|
|
||||||
|
# [flannel]离线镜像tar包
|
||||||
|
flannel_offline: "flannel_{{ flannelVer }}.tar"
|
||||||
|
|
||||||
|
# [calico]设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md
|
||||||
|
CALICO_IPV4POOL_IPIP: "Always"
|
||||||
|
|
||||||
|
# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
|
||||||
|
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
# [calico]设置calico 网络 backend: brid, vxlan, none
|
||||||
|
CALICO_NETWORKING_BACKEND: "brid"
|
||||||
|
|
||||||
|
# [calico]更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x]
|
||||||
|
calicoVer: "v3.15.3"
|
||||||
|
calico_ver: "{{ calicoVer }}"
|
||||||
|
|
||||||
|
# [calico]calico 主版本
|
||||||
|
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
|
||||||
|
|
||||||
|
# [calico]离线镜像tar包
|
||||||
|
calico_offline: "calico_{{ calico_ver }}.tar"
|
||||||
|
|
||||||
|
# [cilium]CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7...
|
||||||
|
ETCD_CLUSTER_SIZE: 1
|
||||||
|
|
||||||
|
# [cilium]镜像版本
|
||||||
|
cilium_ver: "v1.4.1"
|
||||||
|
|
||||||
|
# [cilium]离线镜像tar包
|
||||||
|
cilium_offline: "cilium_{{ cilium_ver }}.tar"
|
||||||
|
|
||||||
|
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
|
||||||
|
OVN_DB_NODE: "{{ groups['kube-master'][0] }}"
|
||||||
|
|
||||||
|
# [kube-ovn]离线镜像tar包
|
||||||
|
kube_ovn_offline: "kube_ovn_0.9.1.tar"
|
||||||
|
|
||||||
|
# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
|
||||||
|
OVERLAY_TYPE: "full"
|
||||||
|
|
||||||
|
# [kube-router]NetworkPolicy 支持开关
|
||||||
|
FIREWALL_ENABLE: "true"
|
||||||
|
|
||||||
|
# [kube-router]kube-router 镜像版本
|
||||||
|
kube_router_ver: "v0.3.1"
|
||||||
|
busybox_ver: "1.28.4"
|
||||||
|
|
||||||
|
# [kube-router]kube-router 离线镜像tar包
|
||||||
|
kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar"
|
||||||
|
busybox_offline: "busybox_{{ busybox_ver }}.tar"
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:cluster-addon
|
||||||
|
############################
|
||||||
|
# dns 自动安装,'dns_backend'可选"coredns"和“kubedns”
|
||||||
|
dns_install: "yes"
|
||||||
|
dns_backend: "coredns"
|
||||||
|
corednsVer: "1.7.1"
|
||||||
|
|
||||||
|
# metric server 自动安装
|
||||||
|
metricsserver_install: "yes"
|
||||||
|
metricsVer: "v0.3.6"
|
||||||
|
|
||||||
|
# dashboard 自动安装
|
||||||
|
dashboard_install: "yes"
|
||||||
|
dashboardVer: "v2.1.0"
|
||||||
|
dashboardMetricsScraperVer: "v1.0.6"
|
||||||
|
|
||||||
|
|
||||||
|
############################
|
||||||
|
# role:harbor
|
||||||
|
############################
|
||||||
|
# harbor version,完整版本号
|
||||||
|
HARBOR_VER: "v1.9.4"
|
|
@ -53,8 +53,11 @@ CLUSTER_DNS_DOMAIN="cluster.local."
|
||||||
# Binaries Directory
|
# Binaries Directory
|
||||||
bin_dir="/opt/kube/bin"
|
bin_dir="/opt/kube/bin"
|
||||||
|
|
||||||
# CA and other components cert/key Directory
|
|
||||||
ca_dir="/etc/kubernetes/ssl"
|
|
||||||
|
|
||||||
# Deploy Directory (kubeasz workspace)
|
# Deploy Directory (kubeasz workspace)
|
||||||
base_dir="/etc/ansible"
|
base_dir="/etc/kubeasz"
|
||||||
|
|
||||||
|
# Directory for a specific cluster
|
||||||
|
cluster_dir="{{ base_dir }}/clusters/_cluster_name_"
|
||||||
|
|
||||||
|
# CA and other components cert/key Directory
|
||||||
|
ca_dir="{{ cluster_dir }}/ssl"
|
||||||
|
|
|
@ -61,4 +61,7 @@ bin_dir="/opt/kube/bin"
|
||||||
ca_dir="/etc/kubernetes/ssl"
|
ca_dir="/etc/kubernetes/ssl"
|
||||||
|
|
||||||
# Deploy Directory (kubeasz workspace)
|
# Deploy Directory (kubeasz workspace)
|
||||||
base_dir="/etc/ansible"
|
base_dir="/etc/kubeasz"
|
||||||
|
|
||||||
|
# Directory for a specific cluster
|
||||||
|
cluster_dir="{{ base_dir }}/clusters/_cluster_name_"
|
||||||
|
|
|
@ -1,38 +1,54 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
# Create & manage k8s clusters by 'kubeasz'
|
||||||
# This script aims to manage k8s clusters created by 'kubeasz'. (developing)
|
|
||||||
|
|
||||||
set -o nounset
|
set -o nounset
|
||||||
set -o errexit
|
set -o errexit
|
||||||
#set -o xtrace
|
#set -o xtrace
|
||||||
|
|
||||||
function usage() {
|
function usage() {
|
||||||
|
echo -e "\033[33mUsage:\033[0m ezctl COMMAND [args]"
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
Usage: easzctl COMMAND [args]
|
-------------------------------------------------------------------------------------
|
||||||
|
Cluster setups:
|
||||||
|
list to list all of the managed clusters
|
||||||
|
new <cluster> to start a new k8s deploy with name 'cluster'
|
||||||
|
setup <cluster> [step] to setup a cluster, also supporting a step-by-step way
|
||||||
|
start-aio to quickly setup an all-in-one cluster with 'default' settings
|
||||||
|
|
||||||
Cluster-wide operation:
|
Cluster ops:
|
||||||
checkout To switch to context <clustername>, or create it if not existed
|
add-etcd <cluster> <args> to add a etcd-node to the etcd cluster
|
||||||
destroy To destroy the current cluster, '--purge' to also delete the context
|
add-master <cluster> <args> to add a master node to the k8s cluster
|
||||||
list To list all of clusters managed
|
add-node <cluster> <args> to add a work node to the k8s cluster
|
||||||
setup To setup a cluster using the current context
|
del-etcd <cluster> <ip> to delete a etcd-node from the etcd cluster
|
||||||
start-aio To quickly setup an all-in-one cluster for testing (like minikube)
|
del-master <cluster> <ip> to delete a master node from the k8s cluster
|
||||||
|
del-node <cluster> <ip> to delete a work node from the k8s cluster
|
||||||
|
upgrade <cluster> to upgrade the k8s cluster
|
||||||
|
destroy <cluster> to destroy the current cluster, '--purge' to also delete the context
|
||||||
|
|
||||||
In-cluster operation:
|
Use "ezctl help <command>" for more information about a given command.
|
||||||
add-etcd To add a etcd-node to the etcd cluster
|
|
||||||
add-master To add a kube-master(master node) to the k8s cluster
|
|
||||||
add-node To add a kube-node(work node) to the k8s cluster
|
|
||||||
del-etcd To delete a etcd-node from the etcd cluster
|
|
||||||
del-master To delete a kube-master from the k8s cluster
|
|
||||||
del-node To delete a kube-node from the k8s cluster
|
|
||||||
upgrade To upgrade the k8s cluster
|
|
||||||
|
|
||||||
Extra operation:
|
|
||||||
basic-auth To enable/disable basic-auth for apiserver
|
|
||||||
|
|
||||||
Use "easzctl help <command>" for more information about a given command.
|
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function logger() {
|
||||||
|
TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S')
|
||||||
|
case "$1" in
|
||||||
|
debug)
|
||||||
|
echo -e "$TIMESTAMP \033[36mDEBUG\033[0m $2"
|
||||||
|
;;
|
||||||
|
info)
|
||||||
|
echo -e "$TIMESTAMP \033[32mINFO\033[0m $2"
|
||||||
|
;;
|
||||||
|
warn)
|
||||||
|
echo -e "$TIMESTAMP \033[33mWARN\033[0m $2"
|
||||||
|
;;
|
||||||
|
error)
|
||||||
|
echo -e "$TIMESTAMP \033[31mERROR\033[0m $2"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
function help-info() {
|
function help-info() {
|
||||||
case "$1" in
|
case "$1" in
|
||||||
(add-etcd)
|
(add-etcd)
|
||||||
|
@ -53,9 +69,6 @@ function help-info() {
|
||||||
(del-node)
|
(del-node)
|
||||||
echo -e "Usage: easzctl del-node <node_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
echo -e "Usage: easzctl del-node <node_ip>\n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
|
||||||
;;
|
;;
|
||||||
(basic-auth)
|
|
||||||
echo -e "Usage: easzctl basic-auth <options>\nOption:\t -s enable basic-auth\n\t -S disable basic-auth\n\t -u <user> set username\n\t -p <pass> set password"
|
|
||||||
;;
|
|
||||||
(*)
|
(*)
|
||||||
usage
|
usage
|
||||||
return 0
|
return 0
|
||||||
|
@ -63,12 +76,59 @@ function help-info() {
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
function process_cmd() {
|
### Cluster setups functions ##############################
|
||||||
echo -e "[INFO] \033[33m$ACTION\033[0m : $CMD"
|
|
||||||
$CMD || { echo -e "[ERROR] \033[31mAction failed\033[0m : $CMD"; return 1; }
|
function new() {
|
||||||
echo -e "[INFO] \033[32mAction successed\033[0m : $CMD"
|
# check if already existed
|
||||||
|
[[ -d "clusters/$1" ]] && { logger error "cluster: $1 already existed"; exit 1; }
|
||||||
|
[[ "$1" == default ]] && { logger error "name 'default' is reserved for `ezctl start-aio`"; exit 1; }
|
||||||
|
|
||||||
|
logger debug "generate custom cluster files in clusters/$1"
|
||||||
|
mkdir -p "clusters/$1"
|
||||||
|
cp example/hosts.multi-node "clusters/$1/hosts"
|
||||||
|
sed -i "s/_cluster_name_/$1/g" "clusters/$1/hosts"
|
||||||
|
cp example/config.yml "clusters/$1/config.yml"
|
||||||
|
|
||||||
|
logger debug "cluster $1: files successfully created."
|
||||||
|
logger info "next steps 1: to config 'clusters/$1/hosts'"
|
||||||
|
logger info "next steps 2: to config 'clusters/$1/config.yml'"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function setup() {
|
||||||
|
[[ -d "clusters/$1" ]] || { logger error "invalid config, run 'ezctl new $1' first"; return 1; }
|
||||||
|
[[ -f "bin/kube-apiserver" ]] || { logger error "no binaries founded, run 'ezdown -D' fist"; return 1; }
|
||||||
|
|
||||||
|
logger info "\n cluster:$1 setup begins in 5s, press any key to abort\n:"
|
||||||
|
! (read -t5 -n1 ANS) || { logger warn "setup aborted"; return 1; }
|
||||||
|
|
||||||
|
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" playbooks/90.setup.yml || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
function list() {
|
||||||
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||||
|
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
||||||
|
echo -e "\nlist of managed contexts (current: \033[33m$CLUSTER\033[0m)"
|
||||||
|
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||||||
|
do
|
||||||
|
echo -e "==> context $i:\t$c"
|
||||||
|
let "i++"
|
||||||
|
done
|
||||||
|
echo -e "\nlist of installed clusters (current: \033[33m$CLUSTER\033[0m)"
|
||||||
|
i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig");
|
||||||
|
do
|
||||||
|
KUBECONF=$BASEPATH/.cluster/$c/config
|
||||||
|
if [ -f "$KUBECONF" ]; then
|
||||||
|
echo -e "==> cluster $i:\t$c"
|
||||||
|
$BASEPATH/bin/kubectl --kubeconfig=$KUBECONF get node
|
||||||
|
fi
|
||||||
|
let "i++"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### in-cluster operation functions ##############################
|
### in-cluster operation functions ##############################
|
||||||
|
|
||||||
function add-node() {
|
function add-node() {
|
||||||
|
@ -256,34 +316,6 @@ function install_context() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkout() {
|
|
||||||
# check directory '.cluster', initialize it if not existed
|
|
||||||
if [ ! -f "$BASEPATH/.cluster/current_cluster" ]; then
|
|
||||||
echo "[INFO] initialize directory $BASEPATH/.cluster"
|
|
||||||
mkdir -p $BASEPATH/.cluster/default
|
|
||||||
echo default > $BASEPATH/.cluster/current_cluster
|
|
||||||
fi
|
|
||||||
|
|
||||||
# check if $1 is already the current context
|
|
||||||
CLUSTER=$(cat $BASEPATH/.cluster/current_cluster)
|
|
||||||
[ "$1" != "$CLUSTER" ] || { echo "[WARN] $1 is already the current context"; return 0; }
|
|
||||||
|
|
||||||
echo "[INFO] save current context: $CLUSTER"
|
|
||||||
save_context
|
|
||||||
echo "[INFO] clean context: $CLUSTER"
|
|
||||||
rm -rf $BASEPATH/hosts /root/.kube/* $BASEPATH/.cluster/ssl $BASEPATH/.cluster/kube-proxy.kubeconfig
|
|
||||||
|
|
||||||
# check context $1, install it if existed, otherwise initialize it using default context
|
|
||||||
if [ ! -d "$BASEPATH/.cluster/$1" ];then
|
|
||||||
echo "[INFO] context $1 not existed, initialize it using default context"
|
|
||||||
cp -rp $BASEPATH/.cluster/default $BASEPATH/.cluster/$1
|
|
||||||
rm -f $BASEPATH/.cluster/$1/hosts $BASEPATH/.cluster/$1/config
|
|
||||||
fi
|
|
||||||
echo "[INFO] change current context to $1"
|
|
||||||
echo $1 > $BASEPATH/.cluster/current_cluster
|
|
||||||
install_context;
|
|
||||||
}
|
|
||||||
|
|
||||||
function setup() {
|
function setup() {
|
||||||
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
[ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout <cluster_name>' first"; return 1; }
|
||||||
[ -f "$BASEPATH/bin/kube-apiserver" ] || { echo "[ERROR] no binaries found, download then fist"; return 1; }
|
[ -f "$BASEPATH/bin/kube-apiserver" ] || { echo "[ERROR] no binaries found, download then fist"; return 1; }
|
||||||
|
@ -405,7 +437,7 @@ function basic-auth(){
|
||||||
|
|
||||||
### Main Lines ##################################################
|
### Main Lines ##################################################
|
||||||
|
|
||||||
BASEPATH=/etc/ansible
|
BASEPATH=/etc/kubeasz
|
||||||
|
|
||||||
[ "$#" -gt 0 ] || { usage >&2; exit 2; }
|
[ "$#" -gt 0 ] || { usage >&2; exit 2; }
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
#--------------------------------------------------
|
#--------------------------------------------------
|
||||||
# This script is for:
|
# This script is for:
|
||||||
# 1. to download the scripts/binaries/images needed for installing a k8s cluster with kubeasz
|
# 1. to download the scripts/binaries/images needed for installing a k8s cluster with kubeasz
|
||||||
# 2. to run kubeasz in a container (optional, not recommend)
|
# 2. to run kubeasz in a container (optional)
|
||||||
# @author: gjmzj
|
# @author: gjmzj
|
||||||
# @usage: ./easzup
|
# @usage: ./ezdown
|
||||||
# @repo: https://github.com/easzlab/kubeasz
|
# @repo: https://github.com/easzlab/kubeasz
|
||||||
# @ref: https://github.com/kubeasz/dockerfiles
|
# @ref: https://github.com/kubeasz/dockerfiles
|
||||||
#--------------------------------------------------
|
#--------------------------------------------------
|
||||||
|
@ -12,7 +12,7 @@ set -o nounset
|
||||||
set -o errexit
|
set -o errexit
|
||||||
#set -o xtrace
|
#set -o xtrace
|
||||||
|
|
||||||
# default version, can be overridden by cmd line options, see ./easzup
|
# default version, can be overridden by cmd line options, see usage
|
||||||
DOCKER_VER=19.03.14
|
DOCKER_VER=19.03.14
|
||||||
KUBEASZ_VER=2.2.3
|
KUBEASZ_VER=2.2.3
|
||||||
K8S_BIN_VER=v1.20.1
|
K8S_BIN_VER=v1.20.1
|
||||||
|
@ -28,38 +28,75 @@ flannelVer=v0.13.0-amd64
|
||||||
metricsVer=v0.3.6
|
metricsVer=v0.3.6
|
||||||
pauseVer=3.2
|
pauseVer=3.2
|
||||||
|
|
||||||
|
function usage() {
|
||||||
|
echo -e "\033[33mUsage:\033[0m ezdown [options] [args]"
|
||||||
|
cat <<EOF
|
||||||
|
option: -{DdekSz}
|
||||||
|
-C stop&clean all local containers
|
||||||
|
-D download all into "$BASE"
|
||||||
|
-P download system packages for offline installing
|
||||||
|
-S start kubeasz in a container
|
||||||
|
-d <ver> set docker-ce version, default "$DOCKER_VER"
|
||||||
|
-e <ver> set kubeasz-ext-bin version, default "$EXT_BIN_VER"
|
||||||
|
-k <ver> set kubeasz-k8s-bin version, default "$K8S_BIN_VER"
|
||||||
|
-m <str> set docker registry mirrors, default "CN"(used in Mainland,China)
|
||||||
|
-p <ver> set kubeasz-sys-pkg version, default "$SYS_PKG_VER"
|
||||||
|
-z <ver> set kubeasz version, default "$KUBEASZ_VER"
|
||||||
|
|
||||||
|
see more at https://github.com/kubeasz/dockerfiles
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
function logger() {
|
||||||
|
TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S')
|
||||||
|
case "$1" in
|
||||||
|
debug)
|
||||||
|
echo -e "$TIMESTAMP \033[36mDEBUG\033[0m $2"
|
||||||
|
;;
|
||||||
|
info)
|
||||||
|
echo -e "$TIMESTAMP \033[32mINFO\033[0m $2"
|
||||||
|
;;
|
||||||
|
warn)
|
||||||
|
echo -e "$TIMESTAMP \033[33mWARN\033[0m $2"
|
||||||
|
;;
|
||||||
|
error)
|
||||||
|
echo -e "$TIMESTAMP \033[31mERROR\033[0m $2"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
function download_docker() {
|
function download_docker() {
|
||||||
echo -e "[INFO] \033[33mdownloading docker binaries\033[0m $DOCKER_VER"
|
|
||||||
if [[ "$REGISTRY_MIRROR" == CN ]];then
|
if [[ "$REGISTRY_MIRROR" == CN ]];then
|
||||||
DOCKER_URL="https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz"
|
DOCKER_URL="https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz"
|
||||||
else
|
else
|
||||||
DOCKER_URL="https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz"
|
DOCKER_URL="https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p /opt/kube/bin /etc/ansible/down /etc/ansible/bin
|
if [[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]];then
|
||||||
if [[ -f "/etc/ansible/down/docker-${DOCKER_VER}.tgz" ]];then
|
logger warn "docker binaries already existed"
|
||||||
echo "[INFO] docker binaries already existed"
|
|
||||||
else
|
else
|
||||||
echo -e "[INFO] \033[33mdownloading docker binaries\033[0m $DOCKER_VER"
|
logger info "downloading docker binaries, version $DOCKER_VER"
|
||||||
if [[ -e /usr/bin/curl ]];then
|
if [[ -e /usr/bin/curl ]];then
|
||||||
curl -C- -O --retry 3 "$DOCKER_URL" || { echo "[ERROR] downloading docker failed"; exit 1; }
|
curl -C- -O --retry 3 "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; }
|
||||||
else
|
else
|
||||||
wget -c "$DOCKER_URL" || { echo "[ERROR] downloading docker failed"; exit 1; }
|
wget -c "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; }
|
||||||
fi
|
fi
|
||||||
/bin/mv -f ./docker-${DOCKER_VER}.tgz /etc/ansible/down
|
/bin/mv -f ./docker-${DOCKER_VER}.tgz $BASE/down
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tar zxf /etc/ansible/down/docker-${DOCKER_VER}.tgz -C /etc/ansible/down && \
|
tar zxf $BASE/down/docker-${DOCKER_VER}.tgz -C $BASE/down && \
|
||||||
/bin/cp -f /etc/ansible/down/docker/* /etc/ansible/bin && \
|
/bin/cp -f $BASE/down/docker/* $BASE/bin && \
|
||||||
/bin/mv -f /etc/ansible/down/docker/* /opt/kube/bin && \
|
/bin/mv -f $BASE/down/docker/* /opt/kube/bin && \
|
||||||
ln -sf /opt/kube/bin/docker /bin/docker
|
ln -sf /opt/kube/bin/docker /bin/docker
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_docker() {
|
function install_docker() {
|
||||||
# check if a container runtime is already installed
|
# check if a container runtime is already installed
|
||||||
systemctl status docker|grep Active|grep -q running && { echo "[WARN] docker is already running."; return 0; }
|
systemctl status docker|grep Active|grep -q running && { logger warn "docker is already running."; return 0; }
|
||||||
|
|
||||||
echo "[INFO] generate docker service file"
|
logger debug "generate docker service file"
|
||||||
cat > /etc/systemd/system/docker.service << EOF
|
cat > /etc/systemd/system/docker.service << EOF
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Docker Application Container Engine
|
Description=Docker Application Container Engine
|
||||||
|
@ -82,9 +119,9 @@ EOF
|
||||||
|
|
||||||
# configuration for dockerd
|
# configuration for dockerd
|
||||||
mkdir -p /etc/docker
|
mkdir -p /etc/docker
|
||||||
echo "[INFO] generate docker config file"
|
logger debug "generate docker config: /etc/docker/daemon.json"
|
||||||
if [[ "$REGISTRY_MIRROR" == CN ]];then
|
if [[ "$REGISTRY_MIRROR" == CN ]];then
|
||||||
echo "[INFO] prepare register mirror for $REGISTRY_MIRROR"
|
logger debug "prepare register mirror for $REGISTRY_MIRROR"
|
||||||
cat > /etc/docker/daemon.json << EOF
|
cat > /etc/docker/daemon.json << EOF
|
||||||
{
|
{
|
||||||
"registry-mirrors": [
|
"registry-mirrors": [
|
||||||
|
@ -102,7 +139,7 @@ EOF
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
echo "[INFO] standard config without registry mirrors"
|
logger debug "standard config without registry mirrors"
|
||||||
cat > /etc/docker/daemon.json << EOF
|
cat > /etc/docker/daemon.json << EOF
|
||||||
{
|
{
|
||||||
"max-concurrent-downloads": 10,
|
"max-concurrent-downloads": 10,
|
||||||
|
@ -118,87 +155,88 @@ EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -e /etc/centos-release || -e /etc/redhat-release ]]; then
|
if [[ -e /etc/centos-release || -e /etc/redhat-release ]]; then
|
||||||
echo "[INFO] turn off selinux in CentOS/Redhat"
|
logger debug "turn off selinux in CentOS/Redhat"
|
||||||
getenforce|grep Disabled || setenforce 0
|
getenforce|grep Disabled || setenforce 0
|
||||||
sed -i 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config
|
sed -i 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "[INFO] enable and start docker"
|
logger debug "enable and start docker"
|
||||||
systemctl enable docker
|
systemctl enable docker
|
||||||
systemctl daemon-reload && systemctl restart docker && sleep 4
|
systemctl daemon-reload && systemctl restart docker && sleep 4
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_kubeasz() {
|
function get_kubeasz() {
|
||||||
# check if kubeasz already existed
|
# check if kubeasz already existed
|
||||||
[[ -d "/etc/ansible/roles/kube-node" ]] && { echo "[WARN] kubeasz already existed"; return 0; }
|
[[ -d "$BASE/roles/kube-node" ]] && { logger warn "kubeasz already existed"; return 0; }
|
||||||
|
|
||||||
echo -e "[INFO] \033[33mdownloading kubeasz\033[0m $KUBEASZ_VER"
|
logger info "downloading kubeasz: $KUBEASZ_VER"
|
||||||
echo "[INFO] run a temporary container"
|
logger debug " run a temporary container"
|
||||||
docker run -d --name temp_easz easzlab/kubeasz:${KUBEASZ_VER} || { echo "[ERROR] download failed."; exit 1; }
|
docker run -d --name temp_easz easzlab/kubeasz:${KUBEASZ_VER} || { logger error "download failed."; exit 1; }
|
||||||
|
|
||||||
[[ -f "/etc/ansible/down/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/etc/ansible/down/docker-${DOCKER_VER}.tgz" /tmp
|
[[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "$BASE/down/docker-${DOCKER_VER}.tgz" /tmp
|
||||||
[[ -d "/etc/ansible/bin" ]] && /bin/mv -f /etc/ansible/bin /tmp
|
[[ -d "$BASE/bin" ]] && /bin/mv -f $BASE/bin /tmp
|
||||||
|
|
||||||
rm -rf /etc/ansible && \
|
rm -rf $BASE && \
|
||||||
echo "[INFO] cp kubeasz code from the temporary container" && \
|
logger debug "cp kubeasz code from the temporary container" && \
|
||||||
docker cp temp_easz:/etc/ansible /etc/ansible && \
|
docker cp temp_easz:$BASE $BASE && \
|
||||||
echo "[INFO] stop&remove temporary container" && \
|
logger debug "stop&remove temporary container" && \
|
||||||
docker rm -f temp_easz
|
docker rm -f temp_easz
|
||||||
|
|
||||||
[[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" /etc/ansible/down
|
mkdir -p $BASE/bin
|
||||||
[[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* /etc/ansible/bin
|
[[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" $BASE/down
|
||||||
return 0
|
[[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* $BASE/bin
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_k8s_bin() {
|
function get_k8s_bin() {
|
||||||
[[ -f "/etc/ansible/bin/kubelet" ]] && { echo "[WARN] kubernetes binaries existed"; return 0; }
|
[[ -f "$BASE/bin/kubelet" ]] && { logger warn "kubernetes binaries existed"; return 0; }
|
||||||
|
|
||||||
echo -e "[INFO] \033[33mdownloading kubernetes\033[0m $K8S_BIN_VER binaries"
|
logger info "downloading kubernetes: $K8S_BIN_VER binaries"
|
||||||
docker pull easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \
|
docker pull easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \
|
||||||
echo "[INFO] run a temporary container" && \
|
logger debug "run a temporary container" && \
|
||||||
docker run -d --name temp_k8s_bin easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \
|
docker run -d --name temp_k8s_bin easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \
|
||||||
echo "[INFO] cp k8s binaries" && \
|
logger debug "cp k8s binaries" && \
|
||||||
docker cp temp_k8s_bin:/k8s /etc/ansible/k8s_bin_tmp && \
|
docker cp temp_k8s_bin:/k8s $BASE/k8s_bin_tmp && \
|
||||||
/bin/mv -f /etc/ansible/k8s_bin_tmp/* /etc/ansible/bin && \
|
/bin/mv -f $BASE/k8s_bin_tmp/* $BASE/bin && \
|
||||||
echo "[INFO] stop&remove temporary container" && \
|
logger debug "stop&remove temporary container" && \
|
||||||
docker rm -f temp_k8s_bin && \
|
docker rm -f temp_k8s_bin && \
|
||||||
rm -rf /etc/ansible/k8s_bin_tmp
|
rm -rf $BASE/k8s_bin_tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_ext_bin() {
|
function get_ext_bin() {
|
||||||
[[ -f "/etc/ansible/bin/etcdctl" ]] && { echo "[WARN] extral binaries existed"; return 0; }
|
[[ -f "$BASE/bin/etcdctl" ]] && { logger warn "extral binaries existed"; return 0; }
|
||||||
|
|
||||||
echo -e "[INFO] \033[33mdownloading extral binaries\033[0m kubeasz-ext-bin:$EXT_BIN_VER"
|
logger info "downloading extral binaries kubeasz-ext-bin:$EXT_BIN_VER"
|
||||||
docker pull easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \
|
docker pull easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \
|
||||||
echo "[INFO] run a temporary container" && \
|
logger debug "run a temporary container" && \
|
||||||
docker run -d --name temp_ext_bin easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \
|
docker run -d --name temp_ext_bin easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \
|
||||||
echo "[INFO] cp extral binaries" && \
|
logger debug "cp extral binaries" && \
|
||||||
docker cp temp_ext_bin:/extra /etc/ansible/extra_bin_tmp && \
|
docker cp temp_ext_bin:/extra $BASE/extra_bin_tmp && \
|
||||||
/bin/mv -f /etc/ansible/extra_bin_tmp/* /etc/ansible/bin && \
|
/bin/mv -f $BASE/extra_bin_tmp/* $BASE/bin && \
|
||||||
echo "[INFO] stop&remove temporary container" && \
|
logger debug "stop&remove temporary container" && \
|
||||||
docker rm -f temp_ext_bin && \
|
docker rm -f temp_ext_bin && \
|
||||||
rm -rf /etc/ansible/extra_bin_tmp
|
rm -rf $BASE/extra_bin_tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_sys_pkg() {
|
function get_sys_pkg() {
|
||||||
[[ -f "/etc/ansible/down/packages/chrony_xenial.tar.gz" ]] && { echo "[WARN] system packages existed"; return 0; }
|
[[ -f "$BASE/down/packages/chrony_xenial.tar.gz" ]] && { logger warn "system packages existed"; return 0; }
|
||||||
|
|
||||||
echo -e "[INFO] \033[33mdownloading system packages\033[0m kubeasz-sys-pkg:$SYS_PKG_VER"
|
logger info "downloading system packages kubeasz-sys-pkg:$SYS_PKG_VER"
|
||||||
docker pull easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \
|
docker pull easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \
|
||||||
echo "[INFO] run a temporary container" && \
|
logger debug "run a temporary container" && \
|
||||||
docker run -d --name temp_sys_pkg easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \
|
docker run -d --name temp_sys_pkg easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \
|
||||||
echo "[INFO] cp system packages" && \
|
logger debug "cp system packages" && \
|
||||||
docker cp temp_sys_pkg:/packages /etc/ansible/down && \
|
docker cp temp_sys_pkg:/packages $BASE/down && \
|
||||||
echo "[INFO] stop&remove temporary container" && \
|
logger debug "stop&remove temporary container" && \
|
||||||
docker rm -f temp_sys_pkg
|
docker rm -f temp_sys_pkg
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_offline_image() {
|
function get_offline_image() {
|
||||||
|
|
||||||
imageDir=/etc/ansible/down
|
imageDir=$BASE/down
|
||||||
[[ -d "$imageDir" ]] || { echo "[ERROR] $imageDir not existed!"; exit 1; }
|
[[ -d "$imageDir" ]] || { logger error "$imageDir not existed!"; exit 1; }
|
||||||
|
|
||||||
echo -e "[INFO] \033[33mdownloading offline images\033[0m"
|
logger info "downloading offline images"
|
||||||
|
|
||||||
if [[ ! -f "$imageDir/calico_$calicoVer.tar" ]];then
|
if [[ ! -f "$imageDir/calico_$calicoVer.tar" ]];then
|
||||||
docker pull "calico/cni:${calicoVer}" && \
|
docker pull "calico/cni:${calicoVer}" && \
|
||||||
|
@ -239,6 +277,7 @@ function get_offline_image() {
|
||||||
}
|
}
|
||||||
|
|
||||||
function download_all() {
|
function download_all() {
|
||||||
|
mkdir -p /opt/kube/bin "$BASE/down" "$BASE/bin"
|
||||||
download_docker && \
|
download_docker && \
|
||||||
install_docker && \
|
install_docker && \
|
||||||
get_kubeasz && \
|
get_kubeasz && \
|
||||||
|
@ -248,16 +287,17 @@ function download_all() {
|
||||||
}
|
}
|
||||||
|
|
||||||
function start_kubeasz_docker() {
|
function start_kubeasz_docker() {
|
||||||
[[ -d "/etc/ansible/roles/kube-node" ]] || { echo "[ERROR] not initialized. try 'easzup -D' first."; exit 1; }
|
[[ -d "$BASE/roles/kube-node" ]] || { logger error "not initialized. try 'ezdown -D' first."; exit 1; }
|
||||||
|
|
||||||
|
logger info "try to run kubeasz in a container"
|
||||||
# get host's IP
|
# get host's IP
|
||||||
host_if=$(ip route|grep default|cut -d' ' -f5)
|
host_if=$(ip route|grep default|cut -d' ' -f5)
|
||||||
host_ip=$(ip a|grep "$host_if$"|awk '{print $2}'|cut -d'/' -f1)
|
host_ip=$(ip a|grep "$host_if$"|awk '{print $2}'|cut -d'/' -f1)
|
||||||
echo "[INFO] get host IP: $host_ip"
|
logger debug "get host IP: $host_ip"
|
||||||
|
|
||||||
# allow ssh login using key locally
|
# allow ssh login using key locally
|
||||||
if [[ ! -e /root/.ssh/id_rsa ]]; then
|
if [[ ! -e /root/.ssh/id_rsa ]]; then
|
||||||
echo "[INFO] generate ssh key pair"
|
logger debug "generate ssh key pair"
|
||||||
ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa > /dev/null
|
ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa > /dev/null
|
||||||
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
|
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
|
||||||
ssh-keyscan -t ecdsa -H "$host_ip" >> /root/.ssh/known_hosts
|
ssh-keyscan -t ecdsa -H "$host_ip" >> /root/.ssh/known_hosts
|
||||||
|
@ -265,20 +305,19 @@ function start_kubeasz_docker() {
|
||||||
|
|
||||||
# create a link '/usr/bin/python' in Ubuntu1604
|
# create a link '/usr/bin/python' in Ubuntu1604
|
||||||
if [[ ! -e /usr/bin/python && -e /etc/debian_version ]]; then
|
if [[ ! -e /usr/bin/python && -e /etc/debian_version ]]; then
|
||||||
echo "[INFO] create a soft link '/usr/bin/python'"
|
logger debug "create a soft link '/usr/bin/python'"
|
||||||
ln -s /usr/bin/python3 /usr/bin/python
|
ln -s /usr/bin/python3 /usr/bin/python
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#
|
#
|
||||||
docker load -i /etc/ansible/down/kubeasz_${KUBEASZ_VER}.tar
|
docker load -i $BASE/down/kubeasz_${KUBEASZ_VER}.tar
|
||||||
|
|
||||||
# run kubeasz docker container
|
# run kubeasz docker container
|
||||||
echo "[INFO] run kubeasz in a container"
|
|
||||||
docker run --detach \
|
docker run --detach \
|
||||||
--name kubeasz \
|
--name kubeasz \
|
||||||
--restart always \
|
--restart always \
|
||||||
--env HOST_IP="$host_ip" \
|
--env HOST_IP="$host_ip" \
|
||||||
--volume /etc/ansible:/etc/ansible \
|
--volume $BASE:$BASE \
|
||||||
--volume /root/.kube:/root/.kube \
|
--volume /root/.kube:/root/.kube \
|
||||||
--volume /root/.ssh/id_rsa:/root/.ssh/id_rsa:ro \
|
--volume /root/.ssh/id_rsa:/root/.ssh/id_rsa:ro \
|
||||||
--volume /root/.ssh/id_rsa.pub:/root/.ssh/id_rsa.pub:ro \
|
--volume /root/.ssh/id_rsa.pub:/root/.ssh/id_rsa.pub:ro \
|
||||||
|
@ -287,35 +326,19 @@ function start_kubeasz_docker() {
|
||||||
}
|
}
|
||||||
|
|
||||||
function clean_container() {
|
function clean_container() {
|
||||||
echo "[INFO] clean all running containers"
|
logger info "clean all running containers"
|
||||||
docker ps -a|awk 'NR>1{print $1}'|xargs docker rm -f
|
docker ps -a|awk 'NR>1{print $1}'|xargs docker rm -f
|
||||||
}
|
}
|
||||||
|
|
||||||
function usage() {
|
|
||||||
cat <<EOF
|
|
||||||
Usage: easzup [options] [args]
|
|
||||||
option: -{DdekSz}
|
|
||||||
-C stop&clean all local containers
|
|
||||||
-D download all into /etc/ansible
|
|
||||||
-P download system packages for offline installing
|
|
||||||
-S start kubeasz in a container
|
|
||||||
-d <ver> set docker-ce version, default "$DOCKER_VER"
|
|
||||||
-e <ver> set kubeasz-ext-bin version, default "$EXT_BIN_VER"
|
|
||||||
-k <ver> set kubeasz-k8s-bin version, default "$K8S_BIN_VER"
|
|
||||||
-m <str> set docker registry mirrors, default "CN"(used in Mainland,China)
|
|
||||||
-p <ver> set kubeasz-sys-pkg version, default "$SYS_PKG_VER"
|
|
||||||
-z <ver> set kubeasz version, default "$KUBEASZ_VER"
|
|
||||||
|
|
||||||
see more at https://github.com/kubeasz/dockerfiles
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
### Main Lines ##################################################
|
### Main Lines ##################################################
|
||||||
function main() {
|
function main() {
|
||||||
|
BASE="/etc/kubeasz"
|
||||||
|
|
||||||
# check if use bash shell
|
# check if use bash shell
|
||||||
readlink /proc/$$/exe|grep -q "dash" && { echo "[ERROR] you should use bash shell, not sh"; exit 1; }
|
readlink /proc/$$/exe|grep -q "dash" && { logger error "you should use bash shell, not sh"; exit 1; }
|
||||||
# check if use with root
|
# check if use with root
|
||||||
[[ "$EUID" -ne 0 ]] && { echo "[ERROR] you should run this script as root"; exit 1; }
|
[[ "$EUID" -ne 0 ]] && { logger error "you should run this script as root"; exit 1; }
|
||||||
|
|
||||||
[[ "$#" -eq 0 ]] && { usage >&2; exit 1; }
|
[[ "$#" -eq 0 ]] && { usage >&2; exit 1; }
|
||||||
|
|
||||||
|
@ -360,12 +383,12 @@ function main() {
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
[[ "$ACTION" == "" ]] && { echo "[ERROR] illegal option"; usage; exit 1; }
|
[[ "$ACTION" == "" ]] && { logger error "illegal option"; usage; exit 1; }
|
||||||
|
|
||||||
# excute cmd "$ACTION"
|
# excute cmd "$ACTION"
|
||||||
echo -e "[INFO] \033[33mAction begin\033[0m : $ACTION"
|
logger info "Action begin: $ACTION"
|
||||||
${ACTION} || { echo -e "[ERROR] \033[31mAction failed\033[0m : $ACTION"; return 1; }
|
${ACTION} || { logger error "Action failed: $ACTION"; return 1; }
|
||||||
echo -e "[INFO] \033[32mAction successed\033[0m : $ACTION"
|
logger info "Action successed: $ACTION"
|
||||||
}
|
}
|
||||||
|
|
||||||
main "$@"
|
main "$@"
|
|
@ -12,9 +12,9 @@
|
||||||
shell: 'for ip in {{ NODE_IPS }};do \
|
shell: 'for ip in {{ NODE_IPS }};do \
|
||||||
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
||||||
--endpoints=https://"$ip":2379 \
|
--endpoints=https://"$ip":2379 \
|
||||||
--cacert={{ base_dir }}/.cluster/ssl/ca.pem \
|
--cacert={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--cert={{ base_dir }}/.cluster/ssl/admin.pem \
|
--cert={{ cluster_dir }}/ssl/admin.pem \
|
||||||
--key={{ base_dir }}/.cluster/ssl/admin-key.pem \
|
--key={{ cluster_dir }}/ssl/admin-key.pem \
|
||||||
endpoint health; \
|
endpoint health; \
|
||||||
done'
|
done'
|
||||||
register: ETCD_CLUSTER_STATUS
|
register: ETCD_CLUSTER_STATUS
|
|
@ -24,9 +24,9 @@
|
||||||
shell: 'for ip in {{ NODE_IPS }};do \
|
shell: 'for ip in {{ NODE_IPS }};do \
|
||||||
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
||||||
--endpoints=https://"$ip":2379 \
|
--endpoints=https://"$ip":2379 \
|
||||||
--cacert={{ base_dir }}/.cluster/ssl/ca.pem \
|
--cacert={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--cert={{ base_dir }}/.cluster/ssl/admin.pem \
|
--cert={{ cluster_dir }}/ssl/admin.pem \
|
||||||
--key={{ base_dir }}/.cluster/ssl/admin-key.pem \
|
--key={{ cluster_dir }}/ssl/admin-key.pem \
|
||||||
endpoint health; \
|
endpoint health; \
|
||||||
done'
|
done'
|
||||||
register: ETCD_CLUSTER_STATUS
|
register: ETCD_CLUSTER_STATUS
|
|
@ -1,7 +1,6 @@
|
||||||
# WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
|
# WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
|
||||||
# Read the guide: 'op/upgrade.md' .
|
# Read the guide: 'op/upgrade.md' .
|
||||||
# Usage: `ansible-playbook /etc/ansible/22.upgrade.yml -t upgrade_k8s`
|
# Usage: ezctl <cluster_name> upgrade
|
||||||
# or `easzctl upgrade`
|
|
||||||
|
|
||||||
# update masters
|
# update masters
|
||||||
- hosts:
|
- hosts:
|
|
@ -12,9 +12,9 @@
|
||||||
shell: 'for ip in {{ NODE_IPS }};do \
|
shell: 'for ip in {{ NODE_IPS }};do \
|
||||||
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \
|
||||||
--endpoints=https://"$ip":2379 \
|
--endpoints=https://"$ip":2379 \
|
||||||
--cacert={{ base_dir }}/.cluster/ssl/ca.pem \
|
--cacert={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--cert={{ base_dir }}/.cluster/ssl/admin.pem \
|
--cert={{ cluster_dir }}/ssl/admin.pem \
|
||||||
--key={{ base_dir }}/.cluster/ssl/admin-key.pem \
|
--key={{ cluster_dir }}/ssl/admin-key.pem \
|
||||||
endpoint health; \
|
endpoint health; \
|
||||||
done'
|
done'
|
||||||
register: ETCD_CLUSTER_STATUS
|
register: ETCD_CLUSTER_STATUS
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
- name: fetch the backup data
|
- name: fetch the backup data
|
||||||
fetch:
|
fetch:
|
||||||
src: /etcd_backup/snapshot_{{ temp }}.db
|
src: /etcd_backup/snapshot_{{ temp }}.db
|
||||||
dest: "{{ base_dir }}/.cluster/backup/"
|
dest: "{{ cluster_dir }}/backup/"
|
||||||
flat: yes
|
flat: yes
|
||||||
delegate_to: "{{ RUNNING_NODE.stdout }}"
|
delegate_to: "{{ RUNNING_NODE.stdout }}"
|
||||||
|
|
||||||
|
@ -52,14 +52,14 @@
|
||||||
- name: Backing up ansible hosts-1
|
- name: Backing up ansible hosts-1
|
||||||
copy:
|
copy:
|
||||||
src: "{{ base_dir }}/hosts"
|
src: "{{ base_dir }}/hosts"
|
||||||
dest: "{{ base_dir }}/.cluster/backup/hosts"
|
dest: "{{ cluster_dir }}/backup/hosts"
|
||||||
register: p
|
register: p
|
||||||
|
|
||||||
- name: Backing up ansible hosts-2
|
- name: Backing up ansible hosts-2
|
||||||
shell: "cd {{ base_dir }}/.cluster/backup && \
|
shell: "cd {{ cluster_dir }}/backup && \
|
||||||
cp -fp hosts hosts-$(date +'%Y%m%d%H%M')"
|
cp -fp hosts hosts-$(date +'%Y%m%d%H%M')"
|
||||||
when: 'p is changed'
|
when: 'p is changed'
|
||||||
|
|
||||||
#- name: Backing up etcd snapshot with datetime
|
#- name: Backing up etcd snapshot with datetime
|
||||||
# shell: "cd {{ base_dir }}/.cluster/backup && \
|
# shell: "cd {{ cluster_dir }}/backup && \
|
||||||
# cp -fp snapshot.db snapshot-$(date +'%Y%m%d%H%M').db"
|
# cp -fp snapshot.db snapshot-$(date +'%Y%m%d%H%M').db"
|
|
@ -1,28 +0,0 @@
|
||||||
# 部分calico相关配置,更全配置可以去roles/calico/templates/calico.yaml.j2自定义
|
|
||||||
|
|
||||||
# etcd 集群服务地址列表, 根据etcd组成员自动生成
|
|
||||||
TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}"
|
|
||||||
ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}"
|
|
||||||
|
|
||||||
# 设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md
|
|
||||||
CALICO_IPV4POOL_IPIP: "Always"
|
|
||||||
|
|
||||||
# 设置 Felix 日志级别
|
|
||||||
FELIX_LOG_LVL: "warning"
|
|
||||||
|
|
||||||
# 设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
|
|
||||||
#IP_AUTODETECTION_METHOD: "interface=eth0"
|
|
||||||
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
# 设置calico 网络 backend: brid, vxlan, none
|
|
||||||
CALICO_NETWORKING_BACKEND: "brid"
|
|
||||||
|
|
||||||
# 更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x]
|
|
||||||
calicoVer: "v3.15.3"
|
|
||||||
calico_ver: "{{ calicoVer }}"
|
|
||||||
|
|
||||||
# calico 主版本
|
|
||||||
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
|
|
||||||
|
|
||||||
# 离线镜像tar包
|
|
||||||
calico_offline: "calico_{{ calico_ver }}.tar"
|
|
|
@ -344,7 +344,7 @@ spec:
|
||||||
value: "false"
|
value: "false"
|
||||||
# Set Felix logging to "info"
|
# Set Felix logging to "info"
|
||||||
- name: FELIX_LOGSEVERITYSCREEN
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
value: "info"
|
value: "warning"
|
||||||
- name: FELIX_HEALTHENABLED
|
- name: FELIX_HEALTHENABLED
|
||||||
value: "true"
|
value: "true"
|
||||||
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
||||||
|
|
|
@ -180,7 +180,7 @@ spec:
|
||||||
value: "false"
|
value: "false"
|
||||||
# Set Felix logging
|
# Set Felix logging
|
||||||
- name: FELIX_LOGSEVERITYSCREEN
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
value: "{{ FELIX_LOG_LVL }}"
|
value: "warning"
|
||||||
- name: FELIX_HEALTHENABLED
|
- name: FELIX_HEALTHENABLED
|
||||||
value: "true"
|
value: "true"
|
||||||
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
||||||
|
|
|
@ -218,7 +218,7 @@ spec:
|
||||||
value: "false"
|
value: "false"
|
||||||
# Set Felix logging
|
# Set Felix logging
|
||||||
- name: FELIX_LOGSEVERITYSCREEN
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
value: "{{ FELIX_LOG_LVL }}"
|
value: "warning"
|
||||||
- name: FELIX_HEALTHENABLED
|
- name: FELIX_HEALTHENABLED
|
||||||
value: "true"
|
value: "true"
|
||||||
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
||||||
|
|
|
@ -318,7 +318,7 @@ spec:
|
||||||
value: "false"
|
value: "false"
|
||||||
# Set Felix logging to "info"
|
# Set Felix logging to "info"
|
||||||
- name: FELIX_LOGSEVERITYSCREEN
|
- name: FELIX_LOGSEVERITYSCREEN
|
||||||
value: "{{ FELIX_LOG_LVL }}"
|
value: "warning"
|
||||||
- name: FELIX_HEALTHENABLED
|
- name: FELIX_HEALTHENABLED
|
||||||
value: "true"
|
value: "true"
|
||||||
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
# Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range,
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
# etcd 集群服务地址列表, 根据etcd组成员自动生成
|
||||||
|
TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}"
|
||||||
|
ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}"
|
|
@ -1,13 +0,0 @@
|
||||||
# 设置时间源服务器
|
|
||||||
ntp_servers:
|
|
||||||
- "ntp1.aliyun.com"
|
|
||||||
- "ntp2.aliyun.com"
|
|
||||||
- "time1.cloud.tencent.com"
|
|
||||||
- "time2.cloud.tencent.com"
|
|
||||||
- "0.cn.pool.ntp.org"
|
|
||||||
|
|
||||||
# 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许
|
|
||||||
local_network: "0.0.0.0/0"
|
|
||||||
|
|
||||||
# 离线安装 chrony (offline|online)
|
|
||||||
INSTALL_SOURCE: "online"
|
|
|
@ -1,14 +0,0 @@
|
||||||
# 部分cilium相关配置, Note: cilium 需要Linux kernel >= 4.9.17
|
|
||||||
|
|
||||||
# 如果 node 节点有多块网卡,请设置 true
|
|
||||||
# 如果发现‘dial tcp 10.68.0.1:443: i/o timeout’的错误,请设置 true
|
|
||||||
NODE_WITH_MULTIPLE_NETWORKS: "true"
|
|
||||||
|
|
||||||
# 镜像版本
|
|
||||||
cilium_ver: "v1.4.1"
|
|
||||||
|
|
||||||
# 离线镜像tar包
|
|
||||||
cilium_offline: "cilium_{{ cilium_ver }}.tar"
|
|
||||||
|
|
||||||
# CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7...
|
|
||||||
ETCD_CLUSTER_SIZE: 1
|
|
|
@ -1,10 +1,8 @@
|
||||||
# dns 自动安装,'dns_backend'可选"coredns"和“kubedns”
|
|
||||||
dns_install: "yes"
|
|
||||||
dns_backend: "coredns"
|
|
||||||
# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP)
|
# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP)
|
||||||
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
||||||
|
|
||||||
|
#
|
||||||
kubednsVer: "1.14.13"
|
kubednsVer: "1.14.13"
|
||||||
corednsVer: "1.7.1"
|
|
||||||
kubedns_offline: "kubedns_{{ kubednsVer }}.tar"
|
kubedns_offline: "kubedns_{{ kubednsVer }}.tar"
|
||||||
coredns_offline: "coredns_{{ corednsVer }}.tar"
|
coredns_offline: "coredns_{{ corednsVer }}.tar"
|
||||||
dns_offline: "{%- if dns_backend == 'coredns' -%} \
|
dns_offline: "{%- if dns_backend == 'coredns' -%} \
|
||||||
|
@ -13,17 +11,9 @@ dns_offline: "{%- if dns_backend == 'coredns' -%} \
|
||||||
{{ kubedns_offline }} \
|
{{ kubedns_offline }} \
|
||||||
{%- endif -%}"
|
{%- endif -%}"
|
||||||
|
|
||||||
# metric server 自动安装
|
|
||||||
metricsserver_install: "yes"
|
|
||||||
metricsVer: "v0.3.6"
|
|
||||||
metricsserver_offline: "metrics-server_{{ metricsVer }}.tar"
|
metricsserver_offline: "metrics-server_{{ metricsVer }}.tar"
|
||||||
|
|
||||||
# dashboard 自动安装
|
|
||||||
# dashboard v2.x.x 不依赖于heapster
|
|
||||||
dashboard_install: "yes"
|
|
||||||
dashboardVer: "v2.0.4"
|
|
||||||
dashboard_offline: "dashboard_{{ dashboardVer }}.tar"
|
dashboard_offline: "dashboard_{{ dashboardVer }}.tar"
|
||||||
dashboardMetricsScraperVer: "v1.0.4"
|
|
||||||
metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar"
|
metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar"
|
||||||
|
|
||||||
# ingress 自动安装,可选 "traefik" 和 "nginx-ingress"
|
# ingress 自动安装,可选 "traefik" 和 "nginx-ingress"
|
||||||
|
@ -36,9 +26,3 @@ metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar"
|
||||||
#metallb_protocol: "layer2"
|
#metallb_protocol: "layer2"
|
||||||
#metallb_offline: "metallb_{{ metallbVer }}.tar"
|
#metallb_offline: "metallb_{{ metallbVer }}.tar"
|
||||||
#metallb_vip_pool: "192.168.1.240/29"
|
#metallb_vip_pool: "192.168.1.240/29"
|
||||||
|
|
||||||
# efk 自动安装
|
|
||||||
#efk_install: "no"
|
|
||||||
|
|
||||||
# prometheus 自动安装
|
|
||||||
#prometheus_install: "no"
|
|
|
@ -1,5 +1,5 @@
|
||||||
# 指定需要恢复的 etcd 数据备份,默认使用最近的一次备份
|
# 指定需要恢复的 etcd 数据备份,默认使用最近的一次备份
|
||||||
# 在ansible 控制端查看备份目录:/etc/ansible/.cluster/backup
|
# 在ansible 控制端查看备份目录:/etc/kubeasz/clusters/_cluster_name_/backup
|
||||||
db_to_restore: "snapshot.db"
|
db_to_restore: "snapshot.db"
|
||||||
|
|
||||||
# etcd 集群间通信的IP和端口, 根据etcd组成员自动生成
|
# etcd 集群间通信的IP和端口, 根据etcd组成员自动生成
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
- name: 准备指定的备份etcd 数据
|
- name: 准备指定的备份etcd 数据
|
||||||
copy:
|
copy:
|
||||||
src: "{{ base_dir }}/.cluster/backup/{{ db_to_restore }}"
|
src: "{{ cluster_dir }}/backup/{{ db_to_restore }}"
|
||||||
dest: "/etcd_backup/snapshot.db"
|
dest: "/etcd_backup/snapshot.db"
|
||||||
|
|
||||||
- name: 清理上次备份恢复数据
|
- name: 清理上次备份恢复数据
|
||||||
|
|
|
@ -1,8 +0,0 @@
|
||||||
# 容器持久化存储目录
|
|
||||||
STORAGE_DIR: "/var/lib/containerd"
|
|
||||||
|
|
||||||
# 基础容器镜像
|
|
||||||
SANDBOX_IMAGE: "easzlab/pause-amd64:3.2"
|
|
||||||
|
|
||||||
# 启用容器仓库镜像
|
|
||||||
ENABLE_MIRROR_REGISTRY: true
|
|
|
@ -1,4 +1,4 @@
|
||||||
root = "{{ STORAGE_DIR }}"
|
root = "{{ CONTAINERD_STORAGE_DIR }}"
|
||||||
state = "/run/containerd"
|
state = "/run/containerd"
|
||||||
oom_score = -999
|
oom_score = -999
|
||||||
|
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
# CA 证书相关参数
|
|
||||||
CA_EXPIRY: "876000h"
|
|
||||||
CERT_EXPIRY: "438000h"
|
|
||||||
|
|
||||||
# apiserver 默认第一个master节点
|
|
||||||
KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443"
|
|
||||||
|
|
||||||
# kubeconfig 配置参数,注意权限根据‘USER_NAME’设置:
|
|
||||||
# 'admin' 表示创建集群管理员(所有)权限的 kubeconfig
|
|
||||||
# 'read' 表示创建只读权限的 kubeconfig
|
|
||||||
CLUSTER_NAME: "cluster1"
|
|
||||||
USER_NAME: "admin"
|
|
||||||
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}-{{ USER_NAME }}"
|
|
|
@ -1,8 +1,8 @@
|
||||||
- name: 准备kube-controller-manager 证书签名请求
|
- name: 准备kube-controller-manager 证书签名请求
|
||||||
template: src=kube-controller-manager-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/kube-controller-manager-csr.json
|
template: src=kube-controller-manager-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-controller-manager-csr.json
|
||||||
|
|
||||||
- name: 创建 kube-controller-manager证书与私钥
|
- name: 创建 kube-controller-manager证书与私钥
|
||||||
shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \
|
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||||
-ca=ca.pem \
|
-ca=ca.pem \
|
||||||
-ca-key=ca-key.pem \
|
-ca-key=ca-key.pem \
|
||||||
-config=ca-config.json \
|
-config=ca-config.json \
|
||||||
|
@ -10,24 +10,24 @@
|
||||||
|
|
||||||
- name: 设置集群参数
|
- name: 设置集群参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
|
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
|
||||||
--certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \
|
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server={{ KUBE_APISERVER }} \
|
--server={{ KUBE_APISERVER }} \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig"
|
||||||
|
|
||||||
- name: 设置认证参数
|
- name: 设置认证参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-controller-manager \
|
shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-controller-manager \
|
||||||
--client-certificate={{ base_dir }}/.cluster/ssl/kube-controller-manager.pem \
|
--client-certificate={{ cluster_dir }}/ssl/kube-controller-manager.pem \
|
||||||
--client-key={{ base_dir }}/.cluster/ssl/kube-controller-manager-key.pem \
|
--client-key={{ cluster_dir }}/ssl/kube-controller-manager-key.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig"
|
||||||
|
|
||||||
- name: 设置上下文参数
|
- name: 设置上下文参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-context default \
|
shell: "{{ base_dir }}/bin/kubectl config set-context default \
|
||||||
--cluster=kubernetes \
|
--cluster=kubernetes \
|
||||||
--user=system:kube-controller-manager \
|
--user=system:kube-controller-manager \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig"
|
||||||
|
|
||||||
- name: 选择默认上下文
|
- name: 选择默认上下文
|
||||||
shell: "{{ base_dir }}/bin/kubectl config use-context default \
|
shell: "{{ base_dir }}/bin/kubectl config use-context default \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
- name: 准备kube-proxy 证书签名请求
|
- name: 准备kube-proxy 证书签名请求
|
||||||
template: src=kube-proxy-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/kube-proxy-csr.json
|
template: src=kube-proxy-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-proxy-csr.json
|
||||||
|
|
||||||
- name: 创建 kube-proxy证书与私钥
|
- name: 创建 kube-proxy证书与私钥
|
||||||
shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \
|
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||||
-ca=ca.pem \
|
-ca=ca.pem \
|
||||||
-ca-key=ca-key.pem \
|
-ca-key=ca-key.pem \
|
||||||
-config=ca-config.json \
|
-config=ca-config.json \
|
||||||
|
@ -10,24 +10,24 @@
|
||||||
|
|
||||||
- name: 设置集群参数
|
- name: 设置集群参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
|
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
|
||||||
--certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \
|
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server={{ KUBE_APISERVER }} \
|
--server={{ KUBE_APISERVER }} \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
|
||||||
|
|
||||||
- name: 设置客户端认证参数
|
- name: 设置客户端认证参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-credentials kube-proxy \
|
shell: "{{ base_dir }}/bin/kubectl config set-credentials kube-proxy \
|
||||||
--client-certificate={{ base_dir }}/.cluster/ssl/kube-proxy.pem \
|
--client-certificate={{ cluster_dir }}/ssl/kube-proxy.pem \
|
||||||
--client-key={{ base_dir }}/.cluster/ssl/kube-proxy-key.pem \
|
--client-key={{ cluster_dir }}/ssl/kube-proxy-key.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
|
||||||
|
|
||||||
- name: 设置上下文参数
|
- name: 设置上下文参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-context default \
|
shell: "{{ base_dir }}/bin/kubectl config set-context default \
|
||||||
--cluster=kubernetes \
|
--cluster=kubernetes \
|
||||||
--user=kube-proxy \
|
--user=kube-proxy \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
|
||||||
|
|
||||||
- name: 选择默认上下文
|
- name: 选择默认上下文
|
||||||
shell: "{{ base_dir }}/bin/kubectl config use-context default \
|
shell: "{{ base_dir }}/bin/kubectl config use-context default \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
- name: 准备kube-scheduler 证书签名请求
|
- name: 准备kube-scheduler 证书签名请求
|
||||||
template: src=kube-scheduler-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/kube-scheduler-csr.json
|
template: src=kube-scheduler-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-scheduler-csr.json
|
||||||
|
|
||||||
- name: 创建 kube-scheduler证书与私钥
|
- name: 创建 kube-scheduler证书与私钥
|
||||||
shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \
|
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||||
-ca=ca.pem \
|
-ca=ca.pem \
|
||||||
-ca-key=ca-key.pem \
|
-ca-key=ca-key.pem \
|
||||||
-config=ca-config.json \
|
-config=ca-config.json \
|
||||||
|
@ -10,24 +10,24 @@
|
||||||
|
|
||||||
- name: 设置集群参数
|
- name: 设置集群参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
|
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
|
||||||
--certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \
|
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server={{ KUBE_APISERVER }} \
|
--server={{ KUBE_APISERVER }} \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
|
||||||
|
|
||||||
- name: 设置认证参数
|
- name: 设置认证参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-scheduler \
|
shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-scheduler \
|
||||||
--client-certificate={{ base_dir }}/.cluster/ssl/kube-scheduler.pem \
|
--client-certificate={{ cluster_dir }}/ssl/kube-scheduler.pem \
|
||||||
--client-key={{ base_dir }}/.cluster/ssl/kube-scheduler-key.pem \
|
--client-key={{ cluster_dir }}/ssl/kube-scheduler-key.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
|
||||||
|
|
||||||
- name: 设置上下文参数
|
- name: 设置上下文参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-context default \
|
shell: "{{ base_dir }}/bin/kubectl config set-context default \
|
||||||
--cluster=kubernetes \
|
--cluster=kubernetes \
|
||||||
--user=system:kube-scheduler \
|
--user=system:kube-scheduler \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
|
||||||
|
|
||||||
- name: 选择默认上下文
|
- name: 选择默认上下文
|
||||||
shell: "{{ base_dir }}/bin/kubectl config use-context default \
|
shell: "{{ base_dir }}/bin/kubectl config use-context default \
|
||||||
--kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig"
|
--kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig"
|
||||||
|
|
|
@ -11,10 +11,10 @@
|
||||||
when: USER_NAME == "read"
|
when: USER_NAME == "read"
|
||||||
|
|
||||||
- name: 准备kubectl使用的{{ USER_NAME }}证书签名请求
|
- name: 准备kubectl使用的{{ USER_NAME }}证书签名请求
|
||||||
template: src={{ USER_NAME }}-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/{{ USER_NAME }}-csr.json
|
template: src={{ USER_NAME }}-csr.json.j2 dest={{ cluster_dir }}/ssl/{{ USER_NAME }}-csr.json
|
||||||
|
|
||||||
- name: 创建{{ USER_NAME }}证书与私钥
|
- name: 创建{{ USER_NAME }}证书与私钥
|
||||||
shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \
|
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
|
||||||
-ca=ca.pem \
|
-ca=ca.pem \
|
||||||
-ca-key=ca-key.pem \
|
-ca-key=ca-key.pem \
|
||||||
-config=ca-config.json \
|
-config=ca-config.json \
|
||||||
|
@ -22,15 +22,15 @@
|
||||||
|
|
||||||
- name: 设置集群参数
|
- name: 设置集群参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \
|
shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \
|
||||||
--certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \
|
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server={{ KUBE_APISERVER }}"
|
--server={{ KUBE_APISERVER }}"
|
||||||
|
|
||||||
- name: 设置客户端认证参数
|
- name: 设置客户端认证参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-credentials {{ USER_NAME }} \
|
shell: "{{ base_dir }}/bin/kubectl config set-credentials {{ USER_NAME }} \
|
||||||
--client-certificate={{ base_dir }}/.cluster/ssl/{{ USER_NAME }}.pem \
|
--client-certificate={{ cluster_dir }}/ssl/{{ USER_NAME }}.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--client-key={{ base_dir }}/.cluster/ssl/{{ USER_NAME }}-key.pem"
|
--client-key={{ cluster_dir }}/ssl/{{ USER_NAME }}-key.pem"
|
||||||
|
|
||||||
- name: 设置上下文参数
|
- name: 设置上下文参数
|
||||||
shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \
|
shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
- name: prepare some dirs
|
- name: prepare some dirs
|
||||||
file: name={{ item }} state=directory
|
file: name={{ item }} state=directory
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ base_dir }}/.cluster/ssl"
|
- "{{ cluster_dir }}/ssl"
|
||||||
- "{{ base_dir }}/.cluster/backup"
|
- "{{ cluster_dir }}/backup"
|
||||||
|
|
||||||
- name: 本地设置 bin 目录权限
|
- name: 本地设置 bin 目录权限
|
||||||
file: path={{ base_dir }}/bin state=directory mode=0755 recurse=yes
|
file: path={{ base_dir }}/bin state=directory mode=0755 recurse=yes
|
||||||
|
@ -10,11 +10,11 @@
|
||||||
# 注册变量p,根据p的stat信息判断是否已经生成过ca证书,如果没有,下一步生成证书
|
# 注册变量p,根据p的stat信息判断是否已经生成过ca证书,如果没有,下一步生成证书
|
||||||
# 如果已经有ca证书,为了保证整个安装的幂等性,跳过证书生成的步骤
|
# 如果已经有ca证书,为了保证整个安装的幂等性,跳过证书生成的步骤
|
||||||
- name: 读取ca证书stat信息
|
- name: 读取ca证书stat信息
|
||||||
stat: path="{{ base_dir }}/.cluster/ssl/ca.pem"
|
stat: path="{{ cluster_dir }}/ssl/ca.pem"
|
||||||
register: p
|
register: p
|
||||||
|
|
||||||
- name: 准备CA配置文件和签名请求
|
- name: 准备CA配置文件和签名请求
|
||||||
template: src={{ item }}.j2 dest={{ base_dir }}/.cluster/ssl/{{ item }}
|
template: src={{ item }}.j2 dest={{ cluster_dir }}/ssl/{{ item }}
|
||||||
with_items:
|
with_items:
|
||||||
- "ca-config.json"
|
- "ca-config.json"
|
||||||
- "ca-csr.json"
|
- "ca-csr.json"
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
|
|
||||||
- name: 生成 CA 证书和私钥
|
- name: 生成 CA 证书和私钥
|
||||||
when: p.stat.isreg is not defined
|
when: p.stat.isreg is not defined
|
||||||
shell: "cd {{ base_dir }}/.cluster/ssl && \
|
shell: "cd {{ cluster_dir }}/ssl && \
|
||||||
{{ base_dir }}/bin/cfssl gencert -initca ca-csr.json | {{ base_dir }}/bin/cfssljson -bare ca"
|
{{ base_dir }}/bin/cfssl gencert -initca ca-csr.json | {{ base_dir }}/bin/cfssljson -bare ca"
|
||||||
|
|
||||||
#----------- 创建配置文件: /root/.kube/config
|
#----------- 创建配置文件: /root/.kube/config
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
# apiserver 默认第一个master节点
|
||||||
|
KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443"
|
|
@ -1,14 +0,0 @@
|
||||||
# docker容器存储目录
|
|
||||||
STORAGE_DIR: "/var/lib/docker"
|
|
||||||
|
|
||||||
# 开启Restful API
|
|
||||||
ENABLE_REMOTE_API: false
|
|
||||||
|
|
||||||
# 启用 docker 仓库镜像
|
|
||||||
ENABLE_MIRROR_REGISTRY: true
|
|
||||||
|
|
||||||
# 设置 docker 仓库镜像
|
|
||||||
REG_MIRRORS: '["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]'
|
|
||||||
|
|
||||||
# 信任的HTTP仓库
|
|
||||||
INSECURE_REG: '["127.0.0.1/8"]'
|
|
|
@ -1,8 +1,11 @@
|
||||||
{
|
{
|
||||||
"data-root": "{{ STORAGE_DIR }}",
|
"data-root": "{{ DOCKER_STORAGE_DIR }}",
|
||||||
"exec-opts": ["native.cgroupdriver=cgroupfs"],
|
"exec-opts": ["native.cgroupdriver=cgroupfs"],
|
||||||
{% if ENABLE_MIRROR_REGISTRY %}
|
{% if ENABLE_MIRROR_REGISTRY %}
|
||||||
"registry-mirrors": {{ REG_MIRRORS }},
|
"registry-mirrors": [
|
||||||
|
"https://docker.mirrors.ustc.edu.cn",
|
||||||
|
"http://hub-mirror.c.163.com"
|
||||||
|
],
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if ENABLE_REMOTE_API %}
|
{% if ENABLE_REMOTE_API %}
|
||||||
"hosts": ["tcp://0.0.0.0:2376", "unix:///var/run/docker.sock"],
|
"hosts": ["tcp://0.0.0.0:2376", "unix:///var/run/docker.sock"],
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
tags: upgrade_etcd
|
tags: upgrade_etcd
|
||||||
|
|
||||||
- name: 分发证书相关
|
- name: 分发证书相关
|
||||||
copy: src={{ base_dir }}/.cluster/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||||
with_items:
|
with_items:
|
||||||
- ca.pem
|
- ca.pem
|
||||||
- ca-key.pem
|
- ca-key.pem
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
# 部分flannel配置,参考 docs/setup/network-plugin/flannel.md
|
|
||||||
|
|
||||||
# 设置flannel 后端
|
|
||||||
#FLANNEL_BACKEND: "host-gw"
|
|
||||||
FLANNEL_BACKEND: "vxlan"
|
|
||||||
DIRECT_ROUTING: false
|
|
||||||
|
|
||||||
#flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64"
|
|
||||||
flannelVer: "v0.13.0-amd64"
|
|
||||||
flanneld_image: "easzlab/flannel:{{ flannelVer }}"
|
|
||||||
|
|
||||||
# 离线镜像tar包
|
|
||||||
flannel_offline: "flannel_{{ flannelVer }}.tar"
|
|
|
@ -34,7 +34,7 @@
|
||||||
|
|
||||||
- block:
|
- block:
|
||||||
- name: 生成自签名证书相关
|
- name: 生成自签名证书相关
|
||||||
copy: src={{ base_dir }}/.cluster/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||||
with_items:
|
with_items:
|
||||||
- ca.pem
|
- ca.pem
|
||||||
- ca-key.pem
|
- ca-key.pem
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
# harbor version,完整版本号,目前支持 v1.5.x , v1.6.x, v1.7.x, v1.8.x, v1.9.x, v1.10.x
|
|
||||||
HARBOR_VER: "v1.9.4"
|
|
||||||
|
|
||||||
# harbor 主版本号,目前支持主版本号 v1.5/v1.6/v1.7/v1.8/v1.9/v.10
|
# harbor 主版本号,目前支持主版本号 v1.5/v1.6/v1.7/v1.8/v1.9/v.10
|
||||||
# 从完整版本号提取出主版本号 v1.5/v1.6/v1.7/v1.8/v1.9/v.10
|
# 从完整版本号提取出主版本号 v1.5/v1.6/v1.7/v1.8/v1.9/v.10
|
||||||
HARBOR_VER_MAIN: "{{ HARBOR_VER.split('.')[0] }}.{{ HARBOR_VER.split('.')[1] }}"
|
HARBOR_VER_MAIN: "{{ HARBOR_VER.split('.')[0] }}.{{ HARBOR_VER.split('.')[1] }}"
|
||||||
|
|
||||||
# HARBOR_HOSTNAME 值设置
|
# HARBOR_HOSTNAME 值设置
|
||||||
HARBOR_HOSTNAME: "{{ inventory_hostname if (HARBOR_DOMAIN == '') else HARBOR_DOMAIN }}"
|
HARBOR_HOSTNAME: "{% if HARBOR_DOMAIN != '' %}HARBOR_DOMAIN{% else %}inventory_hostname{% endif %}"
|
|
@ -1,29 +0,0 @@
|
||||||
# etcd 集群服务地址列表, 根据etcd组成员自动生成
|
|
||||||
TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}"
|
|
||||||
ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}"
|
|
||||||
# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第1个IP)
|
|
||||||
CLUSTER_KUBERNETES_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}"
|
|
||||||
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
|
|
||||||
MASTER_CERT_HOSTS:
|
|
||||||
- "10.1.1.1"
|
|
||||||
- "k8s.test.io"
|
|
||||||
#- "61.182.11.41"
|
|
||||||
#- "www.test.com"
|
|
||||||
|
|
||||||
# apiserver 基础认证(用户名/密码)配置,详见 master 节点文件‘/etc/kubernetes/ssl/basic-auth.csv’
|
|
||||||
# the first three values can be anything;
|
|
||||||
# These tokens are arbitrary but should represent at least 128 bits of entropy derived from
|
|
||||||
# a secure random number generator, for example:
|
|
||||||
# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
|
|
||||||
# 例子: 02b50b05283e98dd0fd71db496ef01e8,kubelet-bootstrap,10001,"system:bootstrappers"
|
|
||||||
# 是否启用基础认证 yes/no
|
|
||||||
BASIC_AUTH_ENABLE: 'yes'
|
|
||||||
# 用户名:
|
|
||||||
BASIC_AUTH_USER: 'admin'
|
|
||||||
# 密码:初次运行时会生成随机密码
|
|
||||||
BASIC_AUTH_PASS: '92c068405aa491239b56140ea6b3b44b'
|
|
||||||
|
|
||||||
# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
|
|
||||||
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
|
|
||||||
# https://github.com/coreos/flannel/issues/847
|
|
||||||
NODE_CIDR_LEN: 24
|
|
|
@ -30,25 +30,6 @@
|
||||||
-config={{ ca_dir }}/ca-config.json \
|
-config={{ ca_dir }}/ca-config.json \
|
||||||
-profile=kubernetes aggregator-proxy-csr.json | {{ bin_dir }}/cfssljson -bare aggregator-proxy"
|
-profile=kubernetes aggregator-proxy-csr.json | {{ bin_dir }}/cfssljson -bare aggregator-proxy"
|
||||||
|
|
||||||
- block:
|
|
||||||
- name: 生成 basic-auth 随机密码
|
|
||||||
shell: 'PWD=`head -c 16 /dev/urandom | od -An -t x | tr -d " "`; \
|
|
||||||
sed -i "s/_pwd_/$PWD/g" {{ base_dir }}/roles/kube-master/defaults/main.yml; \
|
|
||||||
echo $PWD;'
|
|
||||||
connection: local
|
|
||||||
register: TMP_PASS
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: 设置 basic-auth 随机密码
|
|
||||||
set_fact: BASIC_AUTH_PASS={{ TMP_PASS.stdout }}
|
|
||||||
when: 'BASIC_AUTH_ENABLE == "yes" and BASIC_AUTH_PASS == "_pwd_"'
|
|
||||||
tags: restart_master
|
|
||||||
|
|
||||||
- name: 创建 basic-auth.csv
|
|
||||||
template: src=basic-auth.csv.j2 dest={{ ca_dir }}/basic-auth.csv
|
|
||||||
when: 'BASIC_AUTH_ENABLE == "yes"'
|
|
||||||
tags: restart_master
|
|
||||||
|
|
||||||
- name: 替换 kubeconfig 的 apiserver 地址
|
- name: 替换 kubeconfig 的 apiserver 地址
|
||||||
lineinfile:
|
lineinfile:
|
||||||
dest: "{{ item }}"
|
dest: "{{ item }}"
|
||||||
|
@ -84,14 +65,3 @@
|
||||||
retries: 5
|
retries: 5
|
||||||
delay: 6
|
delay: 6
|
||||||
tags: upgrade_k8s, restart_master
|
tags: upgrade_k8s, restart_master
|
||||||
|
|
||||||
- name: 配置{{ BASIC_AUTH_USER }}用户rbac权限
|
|
||||||
template: src=basic-auth-rbac.yaml.j2 dest=/opt/kube/basic-auth-rbac.yaml
|
|
||||||
when: 'BASIC_AUTH_ENABLE == "yes"'
|
|
||||||
tags: restart_master
|
|
||||||
|
|
||||||
- name: 创建{{ BASIC_AUTH_USER }}用户rbac权限
|
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/basic-auth-rbac.yaml"
|
|
||||||
when: 'BASIC_AUTH_ENABLE == "yes"'
|
|
||||||
run_once: true
|
|
||||||
tags: restart_master
|
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: basic-auth-binding
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: cluster-admin
|
|
||||||
subjects:
|
|
||||||
- apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: User
|
|
||||||
name: {{ BASIC_AUTH_USER }}
|
|
|
@ -1,2 +0,0 @@
|
||||||
{{ BASIC_AUTH_PASS }},{{ BASIC_AUTH_USER }},1
|
|
||||||
{{ BASIC_AUTH_PASS | truncate(8, True, '') }},readonly,2
|
|
|
@ -10,9 +10,6 @@ ExecStart={{ bin_dir }}/kube-apiserver \
|
||||||
--anonymous-auth=false \
|
--anonymous-auth=false \
|
||||||
--api-audiences=api,istio-ca \
|
--api-audiences=api,istio-ca \
|
||||||
--authorization-mode=Node,RBAC \
|
--authorization-mode=Node,RBAC \
|
||||||
{% if BASIC_AUTH_ENABLE == "yes" %}
|
|
||||||
--token-auth-file={{ ca_dir }}/basic-auth.csv \
|
|
||||||
{% endif %}
|
|
||||||
--bind-address={{ inventory_hostname }} \
|
--bind-address={{ inventory_hostname }} \
|
||||||
--client-ca-file={{ ca_dir }}/ca.pem \
|
--client-ca-file={{ ca_dir }}/ca.pem \
|
||||||
--endpoint-reconciler-type=lease \
|
--endpoint-reconciler-type=lease \
|
||||||
|
|
|
@ -10,7 +10,7 @@ ExecStart={{ bin_dir }}/kube-controller-manager \
|
||||||
--cluster-name=kubernetes \
|
--cluster-name=kubernetes \
|
||||||
--cluster-signing-cert-file={{ ca_dir }}/ca.pem \
|
--cluster-signing-cert-file={{ ca_dir }}/ca.pem \
|
||||||
--cluster-signing-key-file={{ ca_dir }}/ca-key.pem \
|
--cluster-signing-key-file={{ ca_dir }}/ca-key.pem \
|
||||||
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
|
--kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig \
|
||||||
--leader-elect=true \
|
--leader-elect=true \
|
||||||
--node-cidr-mask-size={{ NODE_CIDR_LEN }} \
|
--node-cidr-mask-size={{ NODE_CIDR_LEN }} \
|
||||||
--root-ca-file={{ ca_dir }}/ca.pem \
|
--root-ca-file={{ ca_dir }}/ca.pem \
|
||||||
|
|
|
@ -5,7 +5,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart={{ bin_dir }}/kube-scheduler \
|
ExecStart={{ bin_dir }}/kube-scheduler \
|
||||||
--address=127.0.0.1 \
|
--address=127.0.0.1 \
|
||||||
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
|
--kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig \
|
||||||
--leader-elect=true \
|
--leader-elect=true \
|
||||||
--v=2
|
--v=2
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
# etcd 集群服务地址列表, 根据etcd组成员自动生成
|
||||||
|
TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}"
|
||||||
|
ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}"
|
||||||
|
|
||||||
|
# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第1个IP)
|
||||||
|
CLUSTER_KUBERNETES_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}"
|
|
@ -1,41 +0,0 @@
|
||||||
# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP)
|
|
||||||
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
|
||||||
|
|
||||||
# 基础容器镜像
|
|
||||||
SANDBOX_IMAGE: "easzlab/pause-amd64:3.2"
|
|
||||||
|
|
||||||
# Kubelet 根目录
|
|
||||||
KUBELET_ROOT_DIR: "/var/lib/kubelet"
|
|
||||||
|
|
||||||
# node节点最大pod 数
|
|
||||||
MAX_PODS: 110
|
|
||||||
|
|
||||||
# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量
|
|
||||||
# 数值设置详见templates/kubelet-config.yaml.j2
|
|
||||||
KUBE_RESERVED_ENABLED: "yes"
|
|
||||||
|
|
||||||
# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;
|
|
||||||
# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2
|
|
||||||
# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留
|
|
||||||
# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存
|
|
||||||
SYS_RESERVED_ENABLED: "no"
|
|
||||||
|
|
||||||
# haproxy balance mode
|
|
||||||
BALANCE_ALG: "roundrobin"
|
|
||||||
|
|
||||||
# 设置 APISERVER 地址
|
|
||||||
KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \
|
|
||||||
https://{{ inventory_hostname }}:6443 \
|
|
||||||
{%- else -%} \
|
|
||||||
{%- if groups['kube-master']|length > 1 -%} \
|
|
||||||
https://127.0.0.1:6443 \
|
|
||||||
{%- else -%} \
|
|
||||||
https://{{ groups['kube-master'][0] }}:6443 \
|
|
||||||
{%- endif -%} \
|
|
||||||
{%- endif -%}"
|
|
||||||
|
|
||||||
# 增加/删除 master 节点时,node 节点需要重新配置 haproxy 等
|
|
||||||
MASTER_CHG: "no"
|
|
||||||
|
|
||||||
# 离线安装 haproxy (offline|online)
|
|
||||||
INSTALL_SOURCE: "online"
|
|
|
@ -10,7 +10,7 @@ ExecStart={{ bin_dir }}/kube-proxy \
|
||||||
--bind-address={{ inventory_hostname }} \
|
--bind-address={{ inventory_hostname }} \
|
||||||
--cluster-cidr={{ CLUSTER_CIDR }} \
|
--cluster-cidr={{ CLUSTER_CIDR }} \
|
||||||
--hostname-override={{ inventory_hostname }} \
|
--hostname-override={{ inventory_hostname }} \
|
||||||
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
|
--kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig \
|
||||||
--logtostderr=true \
|
--logtostderr=true \
|
||||||
--proxy-mode={{ PROXY_MODE }}
|
--proxy-mode={{ PROXY_MODE }}
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP)
|
||||||
|
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
||||||
|
|
||||||
|
# 设置 APISERVER 地址
|
||||||
|
KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \
|
||||||
|
https://{{ inventory_hostname }}:6443 \
|
||||||
|
{%- else -%} \
|
||||||
|
{%- if groups['kube-master']|length > 1 -%} \
|
||||||
|
https://127.0.0.1:6443 \
|
||||||
|
{%- else -%} \
|
||||||
|
https://{{ groups['kube-master'][0] }}:6443 \
|
||||||
|
{%- endif -%} \
|
||||||
|
{%- endif -%}"
|
|
@ -1,10 +1,4 @@
|
||||||
# 选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
|
|
||||||
OVN_DB_NODE: "{{ groups['kube-master'][0] }}"
|
|
||||||
|
|
||||||
kube_ovn_default_cidr: "{{ CLUSTER_CIDR }}"
|
kube_ovn_default_cidr: "{{ CLUSTER_CIDR }}"
|
||||||
kube_ovn_default_gateway: "{{ CLUSTER_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}"
|
kube_ovn_default_gateway: "{{ CLUSTER_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}"
|
||||||
kube_ovn_node_switch_cidr: 100.64.0.0/16
|
kube_ovn_node_switch_cidr: 100.64.0.0/16
|
||||||
kube_ovn_enable_mirror: true
|
kube_ovn_enable_mirror: true
|
||||||
|
|
||||||
# 离线镜像tar包
|
|
||||||
kube_ovn_offline: "kube_ovn_0.9.1.tar"
|
|
|
@ -1,18 +0,0 @@
|
||||||
# 更多设置,参考https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
|
|
||||||
|
|
||||||
# 因目前 kube-proxy 已提供 ipvs 模式,这里不使用 kube-router 的 service_proxy
|
|
||||||
#SERVICE_PROXY: "false"
|
|
||||||
|
|
||||||
# 公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
|
|
||||||
OVERLAY_TYPE: "full"
|
|
||||||
|
|
||||||
# NetworkPolicy 支持开关
|
|
||||||
FIREWALL_ENABLE: "true"
|
|
||||||
|
|
||||||
# kube-router 镜像版本
|
|
||||||
kube_router_ver: "v0.3.1"
|
|
||||||
busybox_ver: "1.28.4"
|
|
||||||
|
|
||||||
# kube-router 离线镜像tar包
|
|
||||||
kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar"
|
|
||||||
busybox_offline: "busybox_{{ busybox_ver }}.tar"
|
|
|
@ -1,3 +0,0 @@
|
||||||
# 离线安装系统软件包 (offline|online)
|
|
||||||
INSTALL_SOURCE: "online"
|
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
|
|
||||||
- block:
|
- block:
|
||||||
- name: 分发证书相关
|
- name: 分发证书相关
|
||||||
copy: src={{ base_dir }}/.cluster/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }}
|
||||||
with_items:
|
with_items:
|
||||||
- admin.pem
|
- admin.pem
|
||||||
- admin-key.pem
|
- admin-key.pem
|
||||||
|
@ -57,13 +57,13 @@
|
||||||
copy: src=/root/.kube/config dest=/root/.kube/config
|
copy: src=/root/.kube/config dest=/root/.kube/config
|
||||||
|
|
||||||
- name: 分发 kube-proxy.kubeconfig配置文件
|
- name: 分发 kube-proxy.kubeconfig配置文件
|
||||||
copy: src={{ base_dir }}/.cluster/kube-proxy.kubeconfig dest=/etc/kubernetes/kube-proxy.kubeconfig
|
copy: src={{ cluster_dir }}/kube-proxy.kubeconfig dest={{ cluster_dir }}/kube-proxy.kubeconfig
|
||||||
|
|
||||||
- name: 分发 kube-controller-manager.kubeconfig配置文件
|
- name: 分发 kube-controller-manager.kubeconfig配置文件
|
||||||
copy: src={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig dest=/etc/kubernetes/kube-controller-manager.kubeconfig
|
copy: src={{ cluster_dir }}/kube-controller-manager.kubeconfig dest={{ cluster_dir }}/kube-controller-manager.kubeconfig
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
when: "inventory_hostname in groups['kube-master']"
|
||||||
|
|
||||||
- name: 分发 kube-scheduler.kubeconfig配置文件
|
- name: 分发 kube-scheduler.kubeconfig配置文件
|
||||||
copy: src={{ base_dir }}/.cluster/kube-scheduler.kubeconfig dest=/etc/kubernetes/kube-scheduler.kubeconfig
|
copy: src={{ cluster_dir }}/kube-scheduler.kubeconfig dest={{ cluster_dir }}/kube-scheduler.kubeconfig
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
when: "inventory_hostname in groups['kube-master']"
|
||||||
when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']"
|
when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']"
|
||||||
|
|
|
@ -1,152 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# 本脚本提供如下功能,作者:Joey Yang, https://github.com/Code2Life
|
|
||||||
# 1. 在Ubuntu/CentOS/Fedora/ArchLinux中自动化的安装python+ansible;
|
|
||||||
# 2. clone kubeasz项目代码, 并将需要的二进制文件下载解压到/etc/ansible/bin中;
|
|
||||||
# 另外, 相关的k8s二进制文件, 我同步到了个人在七牛上的CDN存储中(速度更快), 方便大家下载: filecdn.code2life.top;
|
|
||||||
#
|
|
||||||
# 使用方法:
|
|
||||||
# 1. 支持带参数的运行, 如: ./basic-env-setup.sh k8s.193.tar.gz 指定不同的kubernetes二进制文件, 无参数时默认最新的k8s.1100.tar.gz (k8s 1.10.0 + etcd 3.3.2).
|
|
||||||
# 2. 也可以在任何一台支持的linux设备运行:curl http://filecdn.code2life.top/kubeasz-basic-env-setup.sh | sh -s
|
|
||||||
# 已经亲测centos7/ubuntu16.04/debian9/fedora27都是可以的, 二进制包下载速度贼快.脚本运行完毕之后, 只需到/etc/ansible目录下配置好hosts, 复制完ssh的公钥即可通过ansible-playbook迅速搭建集群了.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# curl http://filecdn.code2life.top/kubeasz-basic-env-setup.sh | sh -s
|
|
||||||
|
|
||||||
# 默认1.10.0 版本的 Kubernetes
|
|
||||||
bin_resource_url='http://filecdn.code2life.top/k8s.1100.tar.gz'
|
|
||||||
|
|
||||||
# 如果参数指定k8s相关的bin以指定的为准, 例如: k8s.193.tar.gz
|
|
||||||
if [ "$1" ];then
|
|
||||||
bin_resource_url="http://filecdn.code2life.top/"$1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# 各Linux版本安装python/pip
|
|
||||||
# ---------------------------
|
|
||||||
|
|
||||||
# debian 默认的apt源在国内访问很慢, 可手动修改/etc/apt/sources.list修改为其他源
|
|
||||||
# 以 debian 9 为例, source.list可修改为如下内容, ubuntu修改方法类似, 找到相应系统和版本的镜像源替换即可
|
|
||||||
# deb http://mirrors.163.com/debian/ stretch main non-free contrib
|
|
||||||
# deb http://mirrors.163.com/debian/ stretch-updates main non-free contrib
|
|
||||||
# deb http://mirrors.163.com/debian/ stretch-backports main non-free contrib
|
|
||||||
# deb http://mirrors.163.com/debian-security/ stretch/updates main non-free contrib
|
|
||||||
basic_ubuntu_debian() {
|
|
||||||
echo "Setup Basic Environment for Ubuntu/Debian."
|
|
||||||
apt-get update && apt-get upgrade -y && apt-get dist-upgrade -y
|
|
||||||
apt-get install python2.7 git python-pip curl -y
|
|
||||||
|
|
||||||
if [ ! -f /usr/bin/python ];then
|
|
||||||
ln -s /usr/bin/python2.7 /usr/bin/python
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# 红帽系Liunx可修改yum源加快下载速度, 修改/etc/yum.repos.d内文件即可
|
|
||||||
basic_centos() {
|
|
||||||
echo "Setup Basic Environment for CentOS."
|
|
||||||
yum install epel-release -y
|
|
||||||
yum update -y
|
|
||||||
yum erase firewalld firewalld-filesystem python-firewall -y
|
|
||||||
yum install git python python-pip curl -y
|
|
||||||
}
|
|
||||||
|
|
||||||
basic_fedora() {
|
|
||||||
echo "Setup Basic Environment for Fedora."
|
|
||||||
yum update -y
|
|
||||||
yum install git python python-pip curl -y
|
|
||||||
}
|
|
||||||
|
|
||||||
# archlinux 使用pacman进行包管理
|
|
||||||
basic_arch() {
|
|
||||||
pacman -Syu --noconfirm
|
|
||||||
pacman -S python git python-pip curl --noconfirm
|
|
||||||
}
|
|
||||||
|
|
||||||
# 使用pip安装ansible, 并下载k8s相关bin文件
|
|
||||||
setup_ansible_k8s() {
|
|
||||||
echo "Download Ansible and Kubernetes binaries."
|
|
||||||
pip install pip --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
|
|
||||||
pip install --no-cache-dir ansible -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
|
|
||||||
|
|
||||||
git clone --depth=1 https://github.com/easzlab/kubeasz.git
|
|
||||||
mv kubeasz /etc/ansible
|
|
||||||
|
|
||||||
# Download from CDN & Move bin files
|
|
||||||
curl -o k8s_download.tar.gz "$bin_resource_url"
|
|
||||||
tar xvf k8s_download.tar.gz
|
|
||||||
mv -f bin/* /etc/ansible/bin
|
|
||||||
rm -rf bin
|
|
||||||
echo "Finish setup. Please config your hosts and run 'ansible-playbook' command at /etc/ansible."
|
|
||||||
}
|
|
||||||
# ---------------------------
|
|
||||||
|
|
||||||
# 判断Linux发行版, 执行不同基础环境设置方法
|
|
||||||
# ---------------------------
|
|
||||||
lsb_dist=''
|
|
||||||
command_exists() {
|
|
||||||
command -v "$@" > /dev/null 2>&1
|
|
||||||
}
|
|
||||||
if command_exists lsb_release; then
|
|
||||||
lsb_dist="$(lsb_release -si)"
|
|
||||||
lsb_version="$(lsb_release -rs)"
|
|
||||||
fi
|
|
||||||
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
|
|
||||||
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
|
|
||||||
lsb_version="$(. /etc/lsb-release && echo "$DISTRIB_RELEASE")"
|
|
||||||
fi
|
|
||||||
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
|
|
||||||
lsb_dist='debian'
|
|
||||||
fi
|
|
||||||
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
|
|
||||||
lsb_dist='fedora'
|
|
||||||
fi
|
|
||||||
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
|
|
||||||
lsb_dist="$(. /etc/os-release && echo "$ID")"
|
|
||||||
fi
|
|
||||||
if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
|
|
||||||
lsb_dist="$(cat /etc/*-release | head -n1 | cut -d " " -f1)"
|
|
||||||
fi
|
|
||||||
if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
|
|
||||||
lsb_dist="$(cat /etc/*-release | head -n1 | cut -d " " -f1)"
|
|
||||||
fi
|
|
||||||
lsb_dist="$(echo $lsb_dist | cut -d " " -f1)"
|
|
||||||
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
|
|
||||||
# ---------------------------
|
|
||||||
|
|
||||||
# ---------------------------
|
|
||||||
setup_env(){
|
|
||||||
case "$lsb_dist" in
|
|
||||||
centos)
|
|
||||||
basic_centos
|
|
||||||
setup_ansible_k8s
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
fedora)
|
|
||||||
basic_fedora
|
|
||||||
setup_ansible_k8s
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
ubuntu)
|
|
||||||
basic_ubuntu_debian
|
|
||||||
setup_ansible_k8s
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
debian)
|
|
||||||
basic_ubuntu_debian
|
|
||||||
setup_ansible_k8s
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
arch)
|
|
||||||
basic_arch
|
|
||||||
setup_ansible_k8s
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
suse)
|
|
||||||
echo 'Not implementation yet.'
|
|
||||||
exit 1
|
|
||||||
esac
|
|
||||||
echo "Error: Unsupported OS, please set ansible environment manually."
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
setup_env
|
|
||||||
# ---------------------------
|
|
|
@ -1,43 +0,0 @@
|
||||||
# 敬告:本脚本仅作测试交流使用,详细操作说明请参阅docs/op/change_ip_allinone.md
|
|
||||||
# 此脚本仅用于allinone部署情况下,需要修改host ip地址使用(比如,虚机装完allinone部署,克隆或者复制分享给别人测试使用)
|
|
||||||
#
|
|
||||||
# ------- 前提 :一个运行正常的allinone部署在虚机,关机后复制给别人使用,新虚机开机后如果需要修改IP,请执行如下步骤
|
|
||||||
# 1. 修改ansible hosts文件:sed -i 's/$OLD_IP/$NEW_IP/g' /etc/ansible/hosts
|
|
||||||
# 2. 配置ssh免密码登录:ssh-copy-id $NEW_IP 按提示完成
|
|
||||||
# 3. 检查下修改是否成功,并且能够成功执行 ansible all -m ping
|
|
||||||
# 4. 运行本脚本 ansible-playbook /etc/ansible/tools/change_ip_aio.yml
|
|
||||||
|
|
||||||
- hosts: kube-master # hosts 角色无所谓,反正allinone所有角色都是同个ip
|
|
||||||
tasks:
|
|
||||||
- name: 删除一些证书和配置,后面会以新IP重新生成
|
|
||||||
file: name={{ item }} state=absent
|
|
||||||
with_items:
|
|
||||||
- "/etc/etcd/ssl/etcd.pem" # 删除etcd证书
|
|
||||||
- "/etc/kubernetes/ssl/kubernetes.pem" # 删除旧master证书
|
|
||||||
- "/etc/kubernetes/kubelet.kubeconfig" # 删除旧kubelet配置文件
|
|
||||||
|
|
||||||
- hosts: kube-master
|
|
||||||
roles:
|
|
||||||
- deploy
|
|
||||||
- etcd
|
|
||||||
- kube-master
|
|
||||||
- kube-node
|
|
||||||
|
|
||||||
- hosts: kube-master
|
|
||||||
tasks:
|
|
||||||
- name: 删除老IP地址的node
|
|
||||||
shell: "{{ bin_dir }}/kubectl get node |grep -v '{{ inventory_hostname }}'|awk '{print $1}' |xargs {{ bin_dir }}/kubectl delete node"
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: 删除原network插件部署
|
|
||||||
shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ CLUSTER_NETWORK }}.yaml || \
|
|
||||||
{{ bin_dir }}/kubectl delete -f /opt/kube/kube-ovn/"
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- hosts: kube-master
|
|
||||||
roles:
|
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
|
||||||
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
|
||||||
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
|
|
||||||
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
|
|
||||||
- { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
|
|
|
@ -1,97 +0,0 @@
|
||||||
# 重置k8s pod网络脚本,使用请仔细阅读 docs/op/change_k8s_network.md
|
|
||||||
- hosts:
|
|
||||||
- kube-master
|
|
||||||
- kube-node
|
|
||||||
tasks:
|
|
||||||
- name: 获取所有已经创建的POD信息
|
|
||||||
command: "{{ bin_dir }}/kubectl get daemonset -n kube-system"
|
|
||||||
register: pod_info
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: 删除原network插件部署
|
|
||||||
shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ item }}.yaml"
|
|
||||||
with_items:
|
|
||||||
- calico
|
|
||||||
- cilium
|
|
||||||
- flannel
|
|
||||||
- kube-router
|
|
||||||
when: 'item in pod_info.stdout'
|
|
||||||
run_once: true
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: 清理kube-router相关
|
|
||||||
shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config"
|
|
||||||
ignore_errors: true
|
|
||||||
when: '"kube-router" in pod_info.stdout'
|
|
||||||
|
|
||||||
- name: 停止 kube-node 相关服务
|
|
||||||
service: name={{ item }} state=stopped
|
|
||||||
with_items:
|
|
||||||
- kubelet
|
|
||||||
- kube-proxy
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: 清理calico残留路由
|
|
||||||
shell: "for rt in `ip route|grep bird|sed 's/blackhole//'|awk '{print $1}'`;do ip route del $rt;done;"
|
|
||||||
when: '"calico" in pod_info.stdout'
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: 清理 kube-proxy产生的iptables规则
|
|
||||||
shell: "{{ bin_dir }}/kube-proxy --cleanup"
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: 清理目录和文件
|
|
||||||
file: name={{ item }} state=absent
|
|
||||||
with_items:
|
|
||||||
- "/etc/cni/"
|
|
||||||
- "/run/flannel/"
|
|
||||||
- "/etc/calico/"
|
|
||||||
- "/var/run/calico/"
|
|
||||||
- "/var/lib/calico/"
|
|
||||||
- "/var/log/calico/"
|
|
||||||
- "/etc/cilium/"
|
|
||||||
- "/var/run/cilium/"
|
|
||||||
- "/sys/fs/bpf/tc/"
|
|
||||||
- "/var/lib/cni/"
|
|
||||||
- "/var/lib/kube-router/"
|
|
||||||
- "/opt/kube/kube-system/"
|
|
||||||
|
|
||||||
- name: 清理网络
|
|
||||||
shell: "ip link del tunl0; \
|
|
||||||
ip link del flannel.1; \
|
|
||||||
ip link del cni0; \
|
|
||||||
ip link del mynet0; \
|
|
||||||
ip link del kube-bridge; \
|
|
||||||
ip link del dummy0; \
|
|
||||||
ip link del kube-ipvs0; \
|
|
||||||
ip link del cilium_net; \
|
|
||||||
ip link del cilium_vxlan; \
|
|
||||||
systemctl restart networking; \
|
|
||||||
systemctl restart network"
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: 开启 kube-node 相关服务
|
|
||||||
service: name={{ item }} state=started enabled=yes
|
|
||||||
with_items:
|
|
||||||
- kubelet
|
|
||||||
- kube-proxy
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- hosts:
|
|
||||||
- kube-master
|
|
||||||
- kube-node
|
|
||||||
# 安装新的网络插件
|
|
||||||
roles:
|
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
|
||||||
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
|
||||||
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
|
|
||||||
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
|
|
||||||
|
|
||||||
- hosts: kube-node
|
|
||||||
tasks:
|
|
||||||
# 删除所有运行pod,由controller自动重建
|
|
||||||
- name: 重启所有pod
|
|
||||||
shell: "for NS in $({{ bin_dir }}/kubectl get ns|awk 'NR>1{print $1}'); \
|
|
||||||
do {{ bin_dir }}/kubectl delete pod --all -n $NS; done;"
|
|
||||||
ignore_errors: true
|
|
||||||
run_once: true
|
|
Loading…
Reference in New Issue