From c69be3f0da53c0db9cdc0d6d353dd4567f5e8d54 Mon Sep 17 00:00:00 2001 From: gjmzj Date: Fri, 25 Dec 2020 11:53:00 +0800 Subject: [PATCH] init commit for dev-3.0 --- .gitignore | 10 +- ansible.cfg | 2 +- dockerfiles/readme.md | 3 - down/download.sh | 4 +- example/config.yml | 182 ++++++++++++++++ example/hosts.allinone | 11 +- example/hosts.multi-node | 5 +- tools/easzctl => ezctl | 148 ++++++++----- tools/easzup => ezdown | 205 ++++++++++-------- 01.prepare.yml => playbooks/01.prepare.yml | 0 02.etcd.yml => playbooks/02.etcd.yml | 0 .../03.containerd.yml | 0 03.docker.yml => playbooks/03.docker.yml | 0 .../04.kube-master.yml | 0 .../05.kube-node.yml | 0 06.network.yml => playbooks/06.network.yml | 0 .../07.cluster-addon.yml | 0 11.harbor.yml => playbooks/11.harbor.yml | 0 .../21.addetcd.yml | 6 +- .../22.addnode.yml | 0 .../23.addmaster.yml | 0 .../31.deletcd.yml | 6 +- .../32.delnode.yml | 0 .../33.delmaster.yml | 0 90.setup.yml => playbooks/90.setup.yml | 0 91.start.yml => playbooks/91.start.yml | 0 92.stop.yml => playbooks/92.stop.yml | 0 22.upgrade.yml => playbooks/93.upgrade.yml | 3 +- 23.backup.yml => playbooks/94.backup.yml | 14 +- 24.restore.yml => playbooks/95.restore.yml | 0 99.clean.yml => playbooks/99.clean.yml | 0 roles/calico/defaults/main.yml | 28 --- roles/calico/templates/calico-v3.15.yaml.j2 | 2 +- roles/calico/templates/calico-v3.3.yaml.j2 | 2 +- roles/calico/templates/calico-v3.4.yaml.j2 | 2 +- roles/calico/templates/calico-v3.8.yaml.j2 | 2 +- roles/calico/vars/main.yml | 3 + roles/chrony/defaults/main.yml | 13 -- roles/cilium/defaults/main.yml | 14 -- .../cluster-addon/{defaults => vars}/main.yml | 20 +- roles/cluster-restore/defaults/main.yml | 2 +- roles/cluster-restore/tasks/main.yml | 2 +- roles/containerd/defaults/main.yml | 8 - roles/containerd/templates/config.toml.j2 | 2 +- roles/deploy/defaults/main.yml | 13 -- ...ate-kube-controller-manager-kubeconfig.yml | 18 +- .../tasks/create-kube-proxy-kubeconfig.yml | 18 +- .../create-kube-scheduler-kubeconfig.yml | 18 +- .../tasks/create-kubectl-kubeconfig.yml | 10 +- roles/deploy/tasks/main.yml | 10 +- roles/deploy/vars/main.yml | 2 + roles/docker/defaults/main.yml | 14 -- roles/docker/templates/daemon.json.j2 | 7 +- roles/etcd/tasks/main.yml | 2 +- roles/etcd/{defaults => vars}/main.yml | 0 roles/flannel/defaults/main.yml | 13 -- roles/harbor/tasks/main.yml | 2 +- roles/harbor/{defaults => vars}/main.yml | 5 +- roles/kube-master/defaults/main.yml | 29 --- roles/kube-master/tasks/main.yml | 30 --- .../templates/basic-auth-rbac.yaml.j2 | 12 - roles/kube-master/templates/basic-auth.csv.j2 | 2 - .../templates/kube-apiserver.service.j2 | 3 - .../kube-controller-manager.service.j2 | 2 +- .../templates/kube-scheduler.service.j2 | 2 +- roles/kube-master/vars/main.yml | 6 + roles/kube-node/defaults/main.yml | 41 ---- .../kube-node/templates/kube-proxy.service.j2 | 2 +- roles/kube-node/vars/main.yml | 13 ++ roles/kube-ovn/{defaults => vars}/main.yml | 6 - roles/kube-router/defaults/main.yml | 18 -- roles/prepare/defaults/main.yml | 3 - roles/prepare/tasks/main.yml | 8 +- tools/basic-env-setup.sh | 152 ------------- tools/change_ip_aio.yml | 43 ---- tools/change_k8s_network.yml | 97 --------- 76 files changed, 502 insertions(+), 798 deletions(-) delete mode 100644 dockerfiles/readme.md create mode 100644 example/config.yml rename tools/easzctl => ezctl (81%) rename tools/easzup => ezdown (65%) rename 01.prepare.yml => playbooks/01.prepare.yml (100%) rename 02.etcd.yml => playbooks/02.etcd.yml (100%) rename 03.containerd.yml => playbooks/03.containerd.yml (100%) rename 03.docker.yml => playbooks/03.docker.yml (100%) rename 04.kube-master.yml => playbooks/04.kube-master.yml (100%) rename 05.kube-node.yml => playbooks/05.kube-node.yml (100%) rename 06.network.yml => playbooks/06.network.yml (100%) rename 07.cluster-addon.yml => playbooks/07.cluster-addon.yml (100%) rename 11.harbor.yml => playbooks/11.harbor.yml (100%) rename tools/01.addetcd.yml => playbooks/21.addetcd.yml (88%) rename tools/02.addnode.yml => playbooks/22.addnode.yml (100%) rename tools/03.addmaster.yml => playbooks/23.addmaster.yml (100%) rename tools/11.deletcd.yml => playbooks/31.deletcd.yml (94%) rename tools/12.delnode.yml => playbooks/32.delnode.yml (100%) rename tools/13.delmaster.yml => playbooks/33.delmaster.yml (100%) rename 90.setup.yml => playbooks/90.setup.yml (100%) rename 91.start.yml => playbooks/91.start.yml (100%) rename 92.stop.yml => playbooks/92.stop.yml (100%) rename 22.upgrade.yml => playbooks/93.upgrade.yml (78%) rename 23.backup.yml => playbooks/94.backup.yml (81%) rename 24.restore.yml => playbooks/95.restore.yml (100%) rename 99.clean.yml => playbooks/99.clean.yml (100%) delete mode 100644 roles/calico/defaults/main.yml create mode 100644 roles/calico/vars/main.yml delete mode 100644 roles/chrony/defaults/main.yml delete mode 100644 roles/cilium/defaults/main.yml rename roles/cluster-addon/{defaults => vars}/main.yml (68%) delete mode 100644 roles/containerd/defaults/main.yml delete mode 100644 roles/deploy/defaults/main.yml create mode 100644 roles/deploy/vars/main.yml delete mode 100644 roles/docker/defaults/main.yml rename roles/etcd/{defaults => vars}/main.yml (100%) delete mode 100644 roles/flannel/defaults/main.yml rename roles/harbor/{defaults => vars}/main.yml (55%) delete mode 100644 roles/kube-master/defaults/main.yml delete mode 100644 roles/kube-master/templates/basic-auth-rbac.yaml.j2 delete mode 100644 roles/kube-master/templates/basic-auth.csv.j2 create mode 100644 roles/kube-master/vars/main.yml delete mode 100644 roles/kube-node/defaults/main.yml create mode 100644 roles/kube-node/vars/main.yml rename roles/kube-ovn/{defaults => vars}/main.yml (52%) delete mode 100644 roles/kube-router/defaults/main.yml delete mode 100644 roles/prepare/defaults/main.yml delete mode 100644 tools/basic-env-setup.sh delete mode 100644 tools/change_ip_aio.yml delete mode 100644 tools/change_k8s_network.yml diff --git a/.gitignore b/.gitignore index c8e8653..e6a3d2d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,12 +2,8 @@ down/* !down/download.sh !down/offline_images -# k8s binaries directory +# binaries directory bin/* -!bin/readme.md - -# ansible hosts -hosts # k8s storage manifests manifests/storage/* @@ -18,8 +14,8 @@ roles/cluster-backup/files/* !roles/cluster-backup/files/readme.md # role based variable settings, exclude roles/os-harden/vars/ -/roles/*/vars/* -!/roles/os-harden/vars/ +#/roles/*/vars/* +#!/roles/os-harden/vars/ # cluster backups .cluster/ diff --git a/ansible.cfg b/ansible.cfg index af38cc3..e85aeed 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -32,7 +32,7 @@ gathering = smart # additional paths to search for roles in, colon separated -roles_path = /etc/ansible/roles +roles_path = /etc/kubeasz/roles # uncomment this to disable SSH key host checking host_key_checking = False diff --git a/dockerfiles/readme.md b/dockerfiles/readme.md deleted file mode 100644 index 2730edf..0000000 --- a/dockerfiles/readme.md +++ /dev/null @@ -1,3 +0,0 @@ -# Dockerfiles for building images needed - -Please refer to https://github.com/kubeasz/dockerfiles diff --git a/down/download.sh b/down/download.sh index 320be11..71fd737 100644 --- a/down/download.sh +++ b/down/download.sh @@ -1,6 +1,6 @@ #!/bin/bash # This script describes where to download the official released binaries needed -# It's suggested to download using 'tools/easzup -D', everything needed will be ready in '/etc/ansible' +# It's suggested to download using 'ezdown -D', everything needed will be ready in '/etc/kubeasz' # example releases K8S_VER=v1.13.7 @@ -11,7 +11,7 @@ DOCKER_COMPOSE_VER=1.23.2 HARBOR_VER=v1.9.4 CONTAINERD_VER=1.2.6 -echo -e "\nNote: It's strongly recommended that downloading with 'tools/easzup -D', everything needed will be ready in '/etc/ansible'." +echo -e "\nNote: It's strongly recommended that downloading with 'ezdown -D', everything needed will be ready in '/etc/kubeasz'." echo -e "\n----download k8s binary at:" echo -e https://dl.k8s.io/${K8S_VER}/kubernetes-server-linux-amd64.tar.gz diff --git a/example/config.yml b/example/config.yml new file mode 100644 index 0000000..9269ec0 --- /dev/null +++ b/example/config.yml @@ -0,0 +1,182 @@ +############################ +# role:prepare +############################ +# 可选离线安装系统软件包 (offline|online) +INSTALL_SOURCE: "online" + + +############################ +# role:chrony +############################ +# 设置时间源服务器 +ntp_servers: + - "ntp1.aliyun.com" + - "time1.cloud.tencent.com" + - "0.cn.pool.ntp.org" + +# 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许 +local_network: "0.0.0.0/0" + + +############################ +# role:deploy +############################ +# default: ca will expire in 100 years +# default: certs issued by the ca will expire in 50 years +CA_EXPIRY: "876000h" +CERT_EXPIRY: "438000h" + +# kubeconfig 配置参数,注意权限根据‘USER_NAME’设置: +# 'admin' 表示创建集群管理员(所有)权限的 kubeconfig +# 'read' 表示创建只读权限的 kubeconfig +CLUSTER_NAME: "cluster1" +USER_NAME: "admin" +CONTEXT_NAME: "context-{{ CLUSTER_NAME }}-{{ USER_NAME }}" + + +############################ +# role:runtime [containerd,docker] +############################ +# [.]启用容器仓库镜像 +ENABLE_MIRROR_REGISTRY: true + +# [containerd]基础容器镜像 +SANDBOX_IMAGE: "easzlab/pause-amd64:3.2" + +# [containerd]容器持久化存储目录 +CONTAINERD_STORAGE_DIR: "/var/lib/containerd" + +# [docker]容器存储目录 +DOCKER_STORAGE_DIR: "/var/lib/docker" + +# [docker]开启Restful API +ENABLE_REMOTE_API: false + +# [docker]信任的HTTP仓库 +INSECURE_REG: '["127.0.0.1/8"]' + + +############################ +# role:kube-master +############################ +# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名) +MASTER_CERT_HOSTS: + - "10.1.1.1" + - "k8s.test.io" + #- "www.test.com" + +# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址) +# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段 +# https://github.com/coreos/flannel/issues/847 +NODE_CIDR_LEN: 24 + + +############################ +# role:kube-node +############################ +# Kubelet 根目录 +KUBELET_ROOT_DIR: "/var/lib/kubelet" + +# node节点最大pod 数 +MAX_PODS: 110 + +# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量 +# 数值设置详见templates/kubelet-config.yaml.j2 +KUBE_RESERVED_ENABLED: "yes" + +# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况; +# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2 +# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留 +# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存 +SYS_RESERVED_ENABLED: "no" + +# haproxy balance mode +BALANCE_ALG: "roundrobin" + + +############################ +# role:network [flannel,calico,cilium,kube-ovn,kube-router] +############################ +# [flannel]设置flannel 后端"host-gw","vxlan"等 +FLANNEL_BACKEND: "vxlan" +DIRECT_ROUTING: false + +# [flannel] flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64" +flannelVer: "v0.13.0-amd64" +flanneld_image: "easzlab/flannel:{{ flannelVer }}" + +# [flannel]离线镜像tar包 +flannel_offline: "flannel_{{ flannelVer }}.tar" + +# [calico]设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md +CALICO_IPV4POOL_IPIP: "Always" + +# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现 +IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}" + +# [calico]设置calico 网络 backend: brid, vxlan, none +CALICO_NETWORKING_BACKEND: "brid" + +# [calico]更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x] +calicoVer: "v3.15.3" +calico_ver: "{{ calicoVer }}" + +# [calico]calico 主版本 +calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}" + +# [calico]离线镜像tar包 +calico_offline: "calico_{{ calico_ver }}.tar" + +# [cilium]CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7... +ETCD_CLUSTER_SIZE: 1 + +# [cilium]镜像版本 +cilium_ver: "v1.4.1" + +# [cilium]离线镜像tar包 +cilium_offline: "cilium_{{ cilium_ver }}.tar" + +# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点 +OVN_DB_NODE: "{{ groups['kube-master'][0] }}" + +# [kube-ovn]离线镜像tar包 +kube_ovn_offline: "kube_ovn_0.9.1.tar" + +# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet" +OVERLAY_TYPE: "full" + +# [kube-router]NetworkPolicy 支持开关 +FIREWALL_ENABLE: "true" + +# [kube-router]kube-router 镜像版本 +kube_router_ver: "v0.3.1" +busybox_ver: "1.28.4" + +# [kube-router]kube-router 离线镜像tar包 +kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar" +busybox_offline: "busybox_{{ busybox_ver }}.tar" + + +############################ +# role:cluster-addon +############################ +# dns 自动安装,'dns_backend'可选"coredns"和“kubedns” +dns_install: "yes" +dns_backend: "coredns" +corednsVer: "1.7.1" + +# metric server 自动安装 +metricsserver_install: "yes" +metricsVer: "v0.3.6" + +# dashboard 自动安装 +dashboard_install: "yes" +dashboardVer: "v2.1.0" +dashboardMetricsScraperVer: "v1.0.6" + + +############################ +# role:harbor +############################ +# harbor version,完整版本号 +HARBOR_VER: "v1.9.4" diff --git a/example/hosts.allinone b/example/hosts.allinone index b380ca6..0d18337 100644 --- a/example/hosts.allinone +++ b/example/hosts.allinone @@ -53,8 +53,11 @@ CLUSTER_DNS_DOMAIN="cluster.local." # Binaries Directory bin_dir="/opt/kube/bin" -# CA and other components cert/key Directory -ca_dir="/etc/kubernetes/ssl" - # Deploy Directory (kubeasz workspace) -base_dir="/etc/ansible" +base_dir="/etc/kubeasz" + +# Directory for a specific cluster +cluster_dir="{{ base_dir }}/clusters/_cluster_name_" + +# CA and other components cert/key Directory +ca_dir="{{ cluster_dir }}/ssl" diff --git a/example/hosts.multi-node b/example/hosts.multi-node index e8a5347..04cdd1e 100644 --- a/example/hosts.multi-node +++ b/example/hosts.multi-node @@ -61,4 +61,7 @@ bin_dir="/opt/kube/bin" ca_dir="/etc/kubernetes/ssl" # Deploy Directory (kubeasz workspace) -base_dir="/etc/ansible" +base_dir="/etc/kubeasz" + +# Directory for a specific cluster +cluster_dir="{{ base_dir }}/clusters/_cluster_name_" diff --git a/tools/easzctl b/ezctl similarity index 81% rename from tools/easzctl rename to ezctl index 3c16744..782066e 100755 --- a/tools/easzctl +++ b/ezctl @@ -1,38 +1,54 @@ #!/bin/bash -# -# This script aims to manage k8s clusters created by 'kubeasz'. (developing) +# Create & manage k8s clusters by 'kubeasz' set -o nounset set -o errexit #set -o xtrace function usage() { + echo -e "\033[33mUsage:\033[0m ezctl COMMAND [args]" cat < to start a new k8s deploy with name 'cluster' + setup [step] to setup a cluster, also supporting a step-by-step way + start-aio to quickly setup an all-in-one cluster with 'default' settings -Cluster-wide operation: - checkout To switch to context , or create it if not existed - destroy To destroy the current cluster, '--purge' to also delete the context - list To list all of clusters managed - setup To setup a cluster using the current context - start-aio To quickly setup an all-in-one cluster for testing (like minikube) +Cluster ops: + add-etcd to add a etcd-node to the etcd cluster + add-master to add a master node to the k8s cluster + add-node to add a work node to the k8s cluster + del-etcd to delete a etcd-node from the etcd cluster + del-master to delete a master node from the k8s cluster + del-node to delete a work node from the k8s cluster + upgrade to upgrade the k8s cluster + destroy to destroy the current cluster, '--purge' to also delete the context -In-cluster operation: - add-etcd To add a etcd-node to the etcd cluster - add-master To add a kube-master(master node) to the k8s cluster - add-node To add a kube-node(work node) to the k8s cluster - del-etcd To delete a etcd-node from the etcd cluster - del-master To delete a kube-master from the k8s cluster - del-node To delete a kube-node from the k8s cluster - upgrade To upgrade the k8s cluster - -Extra operation: - basic-auth To enable/disable basic-auth for apiserver - -Use "easzctl help " for more information about a given command. +Use "ezctl help " for more information about a given command. EOF } +function logger() { + TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S') + case "$1" in + debug) + echo -e "$TIMESTAMP \033[36mDEBUG\033[0m $2" + ;; + info) + echo -e "$TIMESTAMP \033[32mINFO\033[0m $2" + ;; + warn) + echo -e "$TIMESTAMP \033[33mWARN\033[0m $2" + ;; + error) + echo -e "$TIMESTAMP \033[31mERROR\033[0m $2" + ;; + *) + ;; + esac +} + function help-info() { case "$1" in (add-etcd) @@ -53,9 +69,6 @@ function help-info() { (del-node) echo -e "Usage: easzctl del-node \n\nread 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'" ;; - (basic-auth) - echo -e "Usage: easzctl basic-auth \nOption:\t -s enable basic-auth\n\t -S disable basic-auth\n\t -u set username\n\t -p set password" - ;; (*) usage return 0 @@ -63,12 +76,59 @@ function help-info() { esac } -function process_cmd() { - echo -e "[INFO] \033[33m$ACTION\033[0m : $CMD" - $CMD || { echo -e "[ERROR] \033[31mAction failed\033[0m : $CMD"; return 1; } - echo -e "[INFO] \033[32mAction successed\033[0m : $CMD" +### Cluster setups functions ############################## + +function new() { + # check if already existed + [[ -d "clusters/$1" ]] && { logger error "cluster: $1 already existed"; exit 1; } + [[ "$1" == default ]] && { logger error "name 'default' is reserved for `ezctl start-aio`"; exit 1; } + + logger debug "generate custom cluster files in clusters/$1" + mkdir -p "clusters/$1" + cp example/hosts.multi-node "clusters/$1/hosts" + sed -i "s/_cluster_name_/$1/g" "clusters/$1/hosts" + cp example/config.yml "clusters/$1/config.yml" + + logger debug "cluster $1: files successfully created." + logger info "next steps 1: to config 'clusters/$1/hosts'" + logger info "next steps 2: to config 'clusters/$1/config.yml'" } +function setup() { + [[ -d "clusters/$1" ]] || { logger error "invalid config, run 'ezctl new $1' first"; return 1; } + [[ -f "bin/kube-apiserver" ]] || { logger error "no binaries founded, run 'ezdown -D' fist"; return 1; } + + logger info "\n cluster:$1 setup begins in 5s, press any key to abort\n:" + ! (read -t5 -n1 ANS) || { logger warn "setup aborted"; return 1; } + + ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" playbooks/90.setup.yml || return 1 +} + +function list() { + [ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout ' first"; return 1; } + CLUSTER=$(cat $BASEPATH/.cluster/current_cluster) + echo -e "\nlist of managed contexts (current: \033[33m$CLUSTER\033[0m)" + i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig"); + do + echo -e "==> context $i:\t$c" + let "i++" + done + echo -e "\nlist of installed clusters (current: \033[33m$CLUSTER\033[0m)" + i=1; for c in $(ls $BASEPATH/.cluster/ |grep -Ev "backup|ssl|current_cluster|kubeconfig"); + do + KUBECONF=$BASEPATH/.cluster/$c/config + if [ -f "$KUBECONF" ]; then + echo -e "==> cluster $i:\t$c" + $BASEPATH/bin/kubectl --kubeconfig=$KUBECONF get node + fi + let "i++" + done +} + + + + + ### in-cluster operation functions ############################## function add-node() { @@ -256,34 +316,6 @@ function install_context() { fi } -function checkout() { - # check directory '.cluster', initialize it if not existed - if [ ! -f "$BASEPATH/.cluster/current_cluster" ]; then - echo "[INFO] initialize directory $BASEPATH/.cluster" - mkdir -p $BASEPATH/.cluster/default - echo default > $BASEPATH/.cluster/current_cluster - fi - - # check if $1 is already the current context - CLUSTER=$(cat $BASEPATH/.cluster/current_cluster) - [ "$1" != "$CLUSTER" ] || { echo "[WARN] $1 is already the current context"; return 0; } - - echo "[INFO] save current context: $CLUSTER" - save_context - echo "[INFO] clean context: $CLUSTER" - rm -rf $BASEPATH/hosts /root/.kube/* $BASEPATH/.cluster/ssl $BASEPATH/.cluster/kube-proxy.kubeconfig - - # check context $1, install it if existed, otherwise initialize it using default context - if [ ! -d "$BASEPATH/.cluster/$1" ];then - echo "[INFO] context $1 not existed, initialize it using default context" - cp -rp $BASEPATH/.cluster/default $BASEPATH/.cluster/$1 - rm -f $BASEPATH/.cluster/$1/hosts $BASEPATH/.cluster/$1/config - fi - echo "[INFO] change current context to $1" - echo $1 > $BASEPATH/.cluster/current_cluster - install_context; -} - function setup() { [ -f "$BASEPATH/.cluster/current_cluster" ] || { echo "[ERROR] invalid context, run 'easzctl checkout ' first"; return 1; } [ -f "$BASEPATH/bin/kube-apiserver" ] || { echo "[ERROR] no binaries found, download then fist"; return 1; } @@ -405,7 +437,7 @@ function basic-auth(){ ### Main Lines ################################################## -BASEPATH=/etc/ansible +BASEPATH=/etc/kubeasz [ "$#" -gt 0 ] || { usage >&2; exit 2; } diff --git a/tools/easzup b/ezdown similarity index 65% rename from tools/easzup rename to ezdown index 072f025..a9345c9 100755 --- a/tools/easzup +++ b/ezdown @@ -2,9 +2,9 @@ #-------------------------------------------------- # This script is for: # 1. to download the scripts/binaries/images needed for installing a k8s cluster with kubeasz -# 2. to run kubeasz in a container (optional, not recommend) +# 2. to run kubeasz in a container (optional) # @author: gjmzj -# @usage: ./easzup +# @usage: ./ezdown # @repo: https://github.com/easzlab/kubeasz # @ref: https://github.com/kubeasz/dockerfiles #-------------------------------------------------- @@ -12,7 +12,7 @@ set -o nounset set -o errexit #set -o xtrace -# default version, can be overridden by cmd line options, see ./easzup +# default version, can be overridden by cmd line options, see usage DOCKER_VER=19.03.14 KUBEASZ_VER=2.2.3 K8S_BIN_VER=v1.20.1 @@ -28,38 +28,75 @@ flannelVer=v0.13.0-amd64 metricsVer=v0.3.6 pauseVer=3.2 +function usage() { + echo -e "\033[33mUsage:\033[0m ezdown [options] [args]" + cat < set docker-ce version, default "$DOCKER_VER" + -e set kubeasz-ext-bin version, default "$EXT_BIN_VER" + -k set kubeasz-k8s-bin version, default "$K8S_BIN_VER" + -m set docker registry mirrors, default "CN"(used in Mainland,China) + -p set kubeasz-sys-pkg version, default "$SYS_PKG_VER" + -z set kubeasz version, default "$KUBEASZ_VER" + +see more at https://github.com/kubeasz/dockerfiles +EOF +} + +function logger() { + TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S') + case "$1" in + debug) + echo -e "$TIMESTAMP \033[36mDEBUG\033[0m $2" + ;; + info) + echo -e "$TIMESTAMP \033[32mINFO\033[0m $2" + ;; + warn) + echo -e "$TIMESTAMP \033[33mWARN\033[0m $2" + ;; + error) + echo -e "$TIMESTAMP \033[31mERROR\033[0m $2" + ;; + *) + ;; + esac +} + function download_docker() { - echo -e "[INFO] \033[33mdownloading docker binaries\033[0m $DOCKER_VER" if [[ "$REGISTRY_MIRROR" == CN ]];then DOCKER_URL="https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz" else DOCKER_URL="https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz" fi - mkdir -p /opt/kube/bin /etc/ansible/down /etc/ansible/bin - if [[ -f "/etc/ansible/down/docker-${DOCKER_VER}.tgz" ]];then - echo "[INFO] docker binaries already existed" + if [[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]];then + logger warn "docker binaries already existed" else - echo -e "[INFO] \033[33mdownloading docker binaries\033[0m $DOCKER_VER" + logger info "downloading docker binaries, version $DOCKER_VER" if [[ -e /usr/bin/curl ]];then - curl -C- -O --retry 3 "$DOCKER_URL" || { echo "[ERROR] downloading docker failed"; exit 1; } + curl -C- -O --retry 3 "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; } else - wget -c "$DOCKER_URL" || { echo "[ERROR] downloading docker failed"; exit 1; } + wget -c "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; } fi - /bin/mv -f ./docker-${DOCKER_VER}.tgz /etc/ansible/down + /bin/mv -f ./docker-${DOCKER_VER}.tgz $BASE/down fi - tar zxf /etc/ansible/down/docker-${DOCKER_VER}.tgz -C /etc/ansible/down && \ - /bin/cp -f /etc/ansible/down/docker/* /etc/ansible/bin && \ - /bin/mv -f /etc/ansible/down/docker/* /opt/kube/bin && \ + tar zxf $BASE/down/docker-${DOCKER_VER}.tgz -C $BASE/down && \ + /bin/cp -f $BASE/down/docker/* $BASE/bin && \ + /bin/mv -f $BASE/down/docker/* /opt/kube/bin && \ ln -sf /opt/kube/bin/docker /bin/docker } function install_docker() { # check if a container runtime is already installed - systemctl status docker|grep Active|grep -q running && { echo "[WARN] docker is already running."; return 0; } + systemctl status docker|grep Active|grep -q running && { logger warn "docker is already running."; return 0; } - echo "[INFO] generate docker service file" + logger debug "generate docker service file" cat > /etc/systemd/system/docker.service << EOF [Unit] Description=Docker Application Container Engine @@ -82,9 +119,9 @@ EOF # configuration for dockerd mkdir -p /etc/docker - echo "[INFO] generate docker config file" + logger debug "generate docker config: /etc/docker/daemon.json" if [[ "$REGISTRY_MIRROR" == CN ]];then - echo "[INFO] prepare register mirror for $REGISTRY_MIRROR" + logger debug "prepare register mirror for $REGISTRY_MIRROR" cat > /etc/docker/daemon.json << EOF { "registry-mirrors": [ @@ -102,7 +139,7 @@ EOF } EOF else - echo "[INFO] standard config without registry mirrors" + logger debug "standard config without registry mirrors" cat > /etc/docker/daemon.json << EOF { "max-concurrent-downloads": 10, @@ -118,87 +155,88 @@ EOF fi if [[ -e /etc/centos-release || -e /etc/redhat-release ]]; then - echo "[INFO] turn off selinux in CentOS/Redhat" + logger debug "turn off selinux in CentOS/Redhat" getenforce|grep Disabled || setenforce 0 sed -i 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config fi - echo "[INFO] enable and start docker" + logger debug "enable and start docker" systemctl enable docker systemctl daemon-reload && systemctl restart docker && sleep 4 } function get_kubeasz() { # check if kubeasz already existed - [[ -d "/etc/ansible/roles/kube-node" ]] && { echo "[WARN] kubeasz already existed"; return 0; } + [[ -d "$BASE/roles/kube-node" ]] && { logger warn "kubeasz already existed"; return 0; } - echo -e "[INFO] \033[33mdownloading kubeasz\033[0m $KUBEASZ_VER" - echo "[INFO] run a temporary container" - docker run -d --name temp_easz easzlab/kubeasz:${KUBEASZ_VER} || { echo "[ERROR] download failed."; exit 1; } + logger info "downloading kubeasz: $KUBEASZ_VER" + logger debug " run a temporary container" + docker run -d --name temp_easz easzlab/kubeasz:${KUBEASZ_VER} || { logger error "download failed."; exit 1; } - [[ -f "/etc/ansible/down/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/etc/ansible/down/docker-${DOCKER_VER}.tgz" /tmp - [[ -d "/etc/ansible/bin" ]] && /bin/mv -f /etc/ansible/bin /tmp + [[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "$BASE/down/docker-${DOCKER_VER}.tgz" /tmp + [[ -d "$BASE/bin" ]] && /bin/mv -f $BASE/bin /tmp - rm -rf /etc/ansible && \ - echo "[INFO] cp kubeasz code from the temporary container" && \ - docker cp temp_easz:/etc/ansible /etc/ansible && \ - echo "[INFO] stop&remove temporary container" && \ + rm -rf $BASE && \ + logger debug "cp kubeasz code from the temporary container" && \ + docker cp temp_easz:$BASE $BASE && \ + logger debug "stop&remove temporary container" && \ docker rm -f temp_easz - [[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" /etc/ansible/down - [[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* /etc/ansible/bin - return 0 + mkdir -p $BASE/bin + [[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" $BASE/down + [[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* $BASE/bin + return 0 } function get_k8s_bin() { - [[ -f "/etc/ansible/bin/kubelet" ]] && { echo "[WARN] kubernetes binaries existed"; return 0; } + [[ -f "$BASE/bin/kubelet" ]] && { logger warn "kubernetes binaries existed"; return 0; } - echo -e "[INFO] \033[33mdownloading kubernetes\033[0m $K8S_BIN_VER binaries" + logger info "downloading kubernetes: $K8S_BIN_VER binaries" docker pull easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \ - echo "[INFO] run a temporary container" && \ + logger debug "run a temporary container" && \ docker run -d --name temp_k8s_bin easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \ - echo "[INFO] cp k8s binaries" && \ - docker cp temp_k8s_bin:/k8s /etc/ansible/k8s_bin_tmp && \ - /bin/mv -f /etc/ansible/k8s_bin_tmp/* /etc/ansible/bin && \ - echo "[INFO] stop&remove temporary container" && \ + logger debug "cp k8s binaries" && \ + docker cp temp_k8s_bin:/k8s $BASE/k8s_bin_tmp && \ + /bin/mv -f $BASE/k8s_bin_tmp/* $BASE/bin && \ + logger debug "stop&remove temporary container" && \ docker rm -f temp_k8s_bin && \ - rm -rf /etc/ansible/k8s_bin_tmp + rm -rf $BASE/k8s_bin_tmp } function get_ext_bin() { - [[ -f "/etc/ansible/bin/etcdctl" ]] && { echo "[WARN] extral binaries existed"; return 0; } + [[ -f "$BASE/bin/etcdctl" ]] && { logger warn "extral binaries existed"; return 0; } - echo -e "[INFO] \033[33mdownloading extral binaries\033[0m kubeasz-ext-bin:$EXT_BIN_VER" + logger info "downloading extral binaries kubeasz-ext-bin:$EXT_BIN_VER" docker pull easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \ - echo "[INFO] run a temporary container" && \ + logger debug "run a temporary container" && \ docker run -d --name temp_ext_bin easzlab/kubeasz-ext-bin:${EXT_BIN_VER} && \ - echo "[INFO] cp extral binaries" && \ - docker cp temp_ext_bin:/extra /etc/ansible/extra_bin_tmp && \ - /bin/mv -f /etc/ansible/extra_bin_tmp/* /etc/ansible/bin && \ - echo "[INFO] stop&remove temporary container" && \ + logger debug "cp extral binaries" && \ + docker cp temp_ext_bin:/extra $BASE/extra_bin_tmp && \ + /bin/mv -f $BASE/extra_bin_tmp/* $BASE/bin && \ + logger debug "stop&remove temporary container" && \ docker rm -f temp_ext_bin && \ - rm -rf /etc/ansible/extra_bin_tmp + rm -rf $BASE/extra_bin_tmp } function get_sys_pkg() { - [[ -f "/etc/ansible/down/packages/chrony_xenial.tar.gz" ]] && { echo "[WARN] system packages existed"; return 0; } + [[ -f "$BASE/down/packages/chrony_xenial.tar.gz" ]] && { logger warn "system packages existed"; return 0; } - echo -e "[INFO] \033[33mdownloading system packages\033[0m kubeasz-sys-pkg:$SYS_PKG_VER" + logger info "downloading system packages kubeasz-sys-pkg:$SYS_PKG_VER" docker pull easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \ - echo "[INFO] run a temporary container" && \ + logger debug "run a temporary container" && \ docker run -d --name temp_sys_pkg easzlab/kubeasz-sys-pkg:${SYS_PKG_VER} && \ - echo "[INFO] cp system packages" && \ - docker cp temp_sys_pkg:/packages /etc/ansible/down && \ - echo "[INFO] stop&remove temporary container" && \ + logger debug "cp system packages" && \ + docker cp temp_sys_pkg:/packages $BASE/down && \ + logger debug "stop&remove temporary container" && \ docker rm -f temp_sys_pkg } function get_offline_image() { - imageDir=/etc/ansible/down - [[ -d "$imageDir" ]] || { echo "[ERROR] $imageDir not existed!"; exit 1; } + imageDir=$BASE/down + [[ -d "$imageDir" ]] || { logger error "$imageDir not existed!"; exit 1; } - echo -e "[INFO] \033[33mdownloading offline images\033[0m" + logger info "downloading offline images" if [[ ! -f "$imageDir/calico_$calicoVer.tar" ]];then docker pull "calico/cni:${calicoVer}" && \ @@ -239,6 +277,7 @@ function get_offline_image() { } function download_all() { + mkdir -p /opt/kube/bin "$BASE/down" "$BASE/bin" download_docker && \ install_docker && \ get_kubeasz && \ @@ -248,16 +287,17 @@ function download_all() { } function start_kubeasz_docker() { - [[ -d "/etc/ansible/roles/kube-node" ]] || { echo "[ERROR] not initialized. try 'easzup -D' first."; exit 1; } + [[ -d "$BASE/roles/kube-node" ]] || { logger error "not initialized. try 'ezdown -D' first."; exit 1; } + logger info "try to run kubeasz in a container" # get host's IP host_if=$(ip route|grep default|cut -d' ' -f5) host_ip=$(ip a|grep "$host_if$"|awk '{print $2}'|cut -d'/' -f1) - echo "[INFO] get host IP: $host_ip" + logger debug "get host IP: $host_ip" # allow ssh login using key locally if [[ ! -e /root/.ssh/id_rsa ]]; then - echo "[INFO] generate ssh key pair" + logger debug "generate ssh key pair" ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa > /dev/null cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys ssh-keyscan -t ecdsa -H "$host_ip" >> /root/.ssh/known_hosts @@ -265,20 +305,19 @@ function start_kubeasz_docker() { # create a link '/usr/bin/python' in Ubuntu1604 if [[ ! -e /usr/bin/python && -e /etc/debian_version ]]; then - echo "[INFO] create a soft link '/usr/bin/python'" + logger debug "create a soft link '/usr/bin/python'" ln -s /usr/bin/python3 /usr/bin/python fi # - docker load -i /etc/ansible/down/kubeasz_${KUBEASZ_VER}.tar + docker load -i $BASE/down/kubeasz_${KUBEASZ_VER}.tar # run kubeasz docker container - echo "[INFO] run kubeasz in a container" docker run --detach \ --name kubeasz \ --restart always \ --env HOST_IP="$host_ip" \ - --volume /etc/ansible:/etc/ansible \ + --volume $BASE:$BASE \ --volume /root/.kube:/root/.kube \ --volume /root/.ssh/id_rsa:/root/.ssh/id_rsa:ro \ --volume /root/.ssh/id_rsa.pub:/root/.ssh/id_rsa.pub:ro \ @@ -287,35 +326,19 @@ function start_kubeasz_docker() { } function clean_container() { - echo "[INFO] clean all running containers" + logger info "clean all running containers" docker ps -a|awk 'NR>1{print $1}'|xargs docker rm -f } -function usage() { - cat < set docker-ce version, default "$DOCKER_VER" - -e set kubeasz-ext-bin version, default "$EXT_BIN_VER" - -k set kubeasz-k8s-bin version, default "$K8S_BIN_VER" - -m set docker registry mirrors, default "CN"(used in Mainland,China) - -p set kubeasz-sys-pkg version, default "$SYS_PKG_VER" - -z set kubeasz version, default "$KUBEASZ_VER" - -see more at https://github.com/kubeasz/dockerfiles -EOF -} ### Main Lines ################################################## function main() { + BASE="/etc/kubeasz" + # check if use bash shell - readlink /proc/$$/exe|grep -q "dash" && { echo "[ERROR] you should use bash shell, not sh"; exit 1; } + readlink /proc/$$/exe|grep -q "dash" && { logger error "you should use bash shell, not sh"; exit 1; } # check if use with root - [[ "$EUID" -ne 0 ]] && { echo "[ERROR] you should run this script as root"; exit 1; } + [[ "$EUID" -ne 0 ]] && { logger error "you should run this script as root"; exit 1; } [[ "$#" -eq 0 ]] && { usage >&2; exit 1; } @@ -360,12 +383,12 @@ function main() { esac done - [[ "$ACTION" == "" ]] && { echo "[ERROR] illegal option"; usage; exit 1; } + [[ "$ACTION" == "" ]] && { logger error "illegal option"; usage; exit 1; } # excute cmd "$ACTION" - echo -e "[INFO] \033[33mAction begin\033[0m : $ACTION" - ${ACTION} || { echo -e "[ERROR] \033[31mAction failed\033[0m : $ACTION"; return 1; } - echo -e "[INFO] \033[32mAction successed\033[0m : $ACTION" + logger info "Action begin: $ACTION" + ${ACTION} || { logger error "Action failed: $ACTION"; return 1; } + logger info "Action successed: $ACTION" } main "$@" diff --git a/01.prepare.yml b/playbooks/01.prepare.yml similarity index 100% rename from 01.prepare.yml rename to playbooks/01.prepare.yml diff --git a/02.etcd.yml b/playbooks/02.etcd.yml similarity index 100% rename from 02.etcd.yml rename to playbooks/02.etcd.yml diff --git a/03.containerd.yml b/playbooks/03.containerd.yml similarity index 100% rename from 03.containerd.yml rename to playbooks/03.containerd.yml diff --git a/03.docker.yml b/playbooks/03.docker.yml similarity index 100% rename from 03.docker.yml rename to playbooks/03.docker.yml diff --git a/04.kube-master.yml b/playbooks/04.kube-master.yml similarity index 100% rename from 04.kube-master.yml rename to playbooks/04.kube-master.yml diff --git a/05.kube-node.yml b/playbooks/05.kube-node.yml similarity index 100% rename from 05.kube-node.yml rename to playbooks/05.kube-node.yml diff --git a/06.network.yml b/playbooks/06.network.yml similarity index 100% rename from 06.network.yml rename to playbooks/06.network.yml diff --git a/07.cluster-addon.yml b/playbooks/07.cluster-addon.yml similarity index 100% rename from 07.cluster-addon.yml rename to playbooks/07.cluster-addon.yml diff --git a/11.harbor.yml b/playbooks/11.harbor.yml similarity index 100% rename from 11.harbor.yml rename to playbooks/11.harbor.yml diff --git a/tools/01.addetcd.yml b/playbooks/21.addetcd.yml similarity index 88% rename from tools/01.addetcd.yml rename to playbooks/21.addetcd.yml index c56d049..26d004e 100644 --- a/tools/01.addetcd.yml +++ b/playbooks/21.addetcd.yml @@ -12,9 +12,9 @@ shell: 'for ip in {{ NODE_IPS }};do \ ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \ --endpoints=https://"$ip":2379 \ - --cacert={{ base_dir }}/.cluster/ssl/ca.pem \ - --cert={{ base_dir }}/.cluster/ssl/admin.pem \ - --key={{ base_dir }}/.cluster/ssl/admin-key.pem \ + --cacert={{ cluster_dir }}/ssl/ca.pem \ + --cert={{ cluster_dir }}/ssl/admin.pem \ + --key={{ cluster_dir }}/ssl/admin-key.pem \ endpoint health; \ done' register: ETCD_CLUSTER_STATUS diff --git a/tools/02.addnode.yml b/playbooks/22.addnode.yml similarity index 100% rename from tools/02.addnode.yml rename to playbooks/22.addnode.yml diff --git a/tools/03.addmaster.yml b/playbooks/23.addmaster.yml similarity index 100% rename from tools/03.addmaster.yml rename to playbooks/23.addmaster.yml diff --git a/tools/11.deletcd.yml b/playbooks/31.deletcd.yml similarity index 94% rename from tools/11.deletcd.yml rename to playbooks/31.deletcd.yml index e4b32e7..0ca3e99 100644 --- a/tools/11.deletcd.yml +++ b/playbooks/31.deletcd.yml @@ -24,9 +24,9 @@ shell: 'for ip in {{ NODE_IPS }};do \ ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \ --endpoints=https://"$ip":2379 \ - --cacert={{ base_dir }}/.cluster/ssl/ca.pem \ - --cert={{ base_dir }}/.cluster/ssl/admin.pem \ - --key={{ base_dir }}/.cluster/ssl/admin-key.pem \ + --cacert={{ cluster_dir }}/ssl/ca.pem \ + --cert={{ cluster_dir }}/ssl/admin.pem \ + --key={{ cluster_dir }}/ssl/admin-key.pem \ endpoint health; \ done' register: ETCD_CLUSTER_STATUS diff --git a/tools/12.delnode.yml b/playbooks/32.delnode.yml similarity index 100% rename from tools/12.delnode.yml rename to playbooks/32.delnode.yml diff --git a/tools/13.delmaster.yml b/playbooks/33.delmaster.yml similarity index 100% rename from tools/13.delmaster.yml rename to playbooks/33.delmaster.yml diff --git a/90.setup.yml b/playbooks/90.setup.yml similarity index 100% rename from 90.setup.yml rename to playbooks/90.setup.yml diff --git a/91.start.yml b/playbooks/91.start.yml similarity index 100% rename from 91.start.yml rename to playbooks/91.start.yml diff --git a/92.stop.yml b/playbooks/92.stop.yml similarity index 100% rename from 92.stop.yml rename to playbooks/92.stop.yml diff --git a/22.upgrade.yml b/playbooks/93.upgrade.yml similarity index 78% rename from 22.upgrade.yml rename to playbooks/93.upgrade.yml index 164f148..a525ee7 100644 --- a/22.upgrade.yml +++ b/playbooks/93.upgrade.yml @@ -1,7 +1,6 @@ # WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing. # Read the guide: 'op/upgrade.md' . -# Usage: `ansible-playbook /etc/ansible/22.upgrade.yml -t upgrade_k8s` -# or `easzctl upgrade` +# Usage: ezctl upgrade # update masters - hosts: diff --git a/23.backup.yml b/playbooks/94.backup.yml similarity index 81% rename from 23.backup.yml rename to playbooks/94.backup.yml index fa5d483..d2144d4 100644 --- a/23.backup.yml +++ b/playbooks/94.backup.yml @@ -12,9 +12,9 @@ shell: 'for ip in {{ NODE_IPS }};do \ ETCDCTL_API=3 {{ base_dir }}/bin/etcdctl \ --endpoints=https://"$ip":2379 \ - --cacert={{ base_dir }}/.cluster/ssl/ca.pem \ - --cert={{ base_dir }}/.cluster/ssl/admin.pem \ - --key={{ base_dir }}/.cluster/ssl/admin-key.pem \ + --cacert={{ cluster_dir }}/ssl/ca.pem \ + --cert={{ cluster_dir }}/ssl/admin.pem \ + --key={{ cluster_dir }}/ssl/admin-key.pem \ endpoint health; \ done' register: ETCD_CLUSTER_STATUS @@ -42,7 +42,7 @@ - name: fetch the backup data fetch: src: /etcd_backup/snapshot_{{ temp }}.db - dest: "{{ base_dir }}/.cluster/backup/" + dest: "{{ cluster_dir }}/backup/" flat: yes delegate_to: "{{ RUNNING_NODE.stdout }}" @@ -52,14 +52,14 @@ - name: Backing up ansible hosts-1 copy: src: "{{ base_dir }}/hosts" - dest: "{{ base_dir }}/.cluster/backup/hosts" + dest: "{{ cluster_dir }}/backup/hosts" register: p - name: Backing up ansible hosts-2 - shell: "cd {{ base_dir }}/.cluster/backup && \ + shell: "cd {{ cluster_dir }}/backup && \ cp -fp hosts hosts-$(date +'%Y%m%d%H%M')" when: 'p is changed' #- name: Backing up etcd snapshot with datetime - # shell: "cd {{ base_dir }}/.cluster/backup && \ + # shell: "cd {{ cluster_dir }}/backup && \ # cp -fp snapshot.db snapshot-$(date +'%Y%m%d%H%M').db" diff --git a/24.restore.yml b/playbooks/95.restore.yml similarity index 100% rename from 24.restore.yml rename to playbooks/95.restore.yml diff --git a/99.clean.yml b/playbooks/99.clean.yml similarity index 100% rename from 99.clean.yml rename to playbooks/99.clean.yml diff --git a/roles/calico/defaults/main.yml b/roles/calico/defaults/main.yml deleted file mode 100644 index 2cb65ec..0000000 --- a/roles/calico/defaults/main.yml +++ /dev/null @@ -1,28 +0,0 @@ -# 部分calico相关配置,更全配置可以去roles/calico/templates/calico.yaml.j2自定义 - -# etcd 集群服务地址列表, 根据etcd组成员自动生成 -TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}" -ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}" - -# 设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md -CALICO_IPV4POOL_IPIP: "Always" - -# 设置 Felix 日志级别 -FELIX_LOG_LVL: "warning" - -# 设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现 -#IP_AUTODETECTION_METHOD: "interface=eth0" -IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}" - -# 设置calico 网络 backend: brid, vxlan, none -CALICO_NETWORKING_BACKEND: "brid" - -# 更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x] -calicoVer: "v3.15.3" -calico_ver: "{{ calicoVer }}" - -# calico 主版本 -calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}" - -# 离线镜像tar包 -calico_offline: "calico_{{ calico_ver }}.tar" diff --git a/roles/calico/templates/calico-v3.15.yaml.j2 b/roles/calico/templates/calico-v3.15.yaml.j2 index 5c81b17..a84647d 100644 --- a/roles/calico/templates/calico-v3.15.yaml.j2 +++ b/roles/calico/templates/calico-v3.15.yaml.j2 @@ -344,7 +344,7 @@ spec: value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN - value: "info" + value: "warning" - name: FELIX_HEALTHENABLED value: "true" # Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range, diff --git a/roles/calico/templates/calico-v3.3.yaml.j2 b/roles/calico/templates/calico-v3.3.yaml.j2 index ca2254e..68d6d29 100644 --- a/roles/calico/templates/calico-v3.3.yaml.j2 +++ b/roles/calico/templates/calico-v3.3.yaml.j2 @@ -180,7 +180,7 @@ spec: value: "false" # Set Felix logging - name: FELIX_LOGSEVERITYSCREEN - value: "{{ FELIX_LOG_LVL }}" + value: "warning" - name: FELIX_HEALTHENABLED value: "true" # Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range, diff --git a/roles/calico/templates/calico-v3.4.yaml.j2 b/roles/calico/templates/calico-v3.4.yaml.j2 index e23c730..8459edc 100644 --- a/roles/calico/templates/calico-v3.4.yaml.j2 +++ b/roles/calico/templates/calico-v3.4.yaml.j2 @@ -218,7 +218,7 @@ spec: value: "false" # Set Felix logging - name: FELIX_LOGSEVERITYSCREEN - value: "{{ FELIX_LOG_LVL }}" + value: "warning" - name: FELIX_HEALTHENABLED value: "true" # Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range, diff --git a/roles/calico/templates/calico-v3.8.yaml.j2 b/roles/calico/templates/calico-v3.8.yaml.j2 index 6f94262..e6d689f 100644 --- a/roles/calico/templates/calico-v3.8.yaml.j2 +++ b/roles/calico/templates/calico-v3.8.yaml.j2 @@ -318,7 +318,7 @@ spec: value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN - value: "{{ FELIX_LOG_LVL }}" + value: "warning" - name: FELIX_HEALTHENABLED value: "true" # Set Kubernetes NodePorts: If services do use NodePorts outside Calico’s expected range, diff --git a/roles/calico/vars/main.yml b/roles/calico/vars/main.yml new file mode 100644 index 0000000..da3d3b8 --- /dev/null +++ b/roles/calico/vars/main.yml @@ -0,0 +1,3 @@ +# etcd 集群服务地址列表, 根据etcd组成员自动生成 +TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}" +ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}" diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml deleted file mode 100644 index 3f8d713..0000000 --- a/roles/chrony/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ -# 设置时间源服务器 -ntp_servers: - - "ntp1.aliyun.com" - - "ntp2.aliyun.com" - - "time1.cloud.tencent.com" - - "time2.cloud.tencent.com" - - "0.cn.pool.ntp.org" - -# 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许 -local_network: "0.0.0.0/0" - -# 离线安装 chrony (offline|online) -INSTALL_SOURCE: "online" diff --git a/roles/cilium/defaults/main.yml b/roles/cilium/defaults/main.yml deleted file mode 100644 index 4f38ba6..0000000 --- a/roles/cilium/defaults/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -# 部分cilium相关配置, Note: cilium 需要Linux kernel >= 4.9.17 - -# 如果 node 节点有多块网卡,请设置 true -# 如果发现‘dial tcp 10.68.0.1:443: i/o timeout’的错误,请设置 true -NODE_WITH_MULTIPLE_NETWORKS: "true" - -# 镜像版本 -cilium_ver: "v1.4.1" - -# 离线镜像tar包 -cilium_offline: "cilium_{{ cilium_ver }}.tar" - -# CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7... -ETCD_CLUSTER_SIZE: 1 diff --git a/roles/cluster-addon/defaults/main.yml b/roles/cluster-addon/vars/main.yml similarity index 68% rename from roles/cluster-addon/defaults/main.yml rename to roles/cluster-addon/vars/main.yml index 8713046..3f3cc86 100644 --- a/roles/cluster-addon/defaults/main.yml +++ b/roles/cluster-addon/vars/main.yml @@ -1,10 +1,8 @@ -# dns 自动安装,'dns_backend'可选"coredns"和“kubedns” -dns_install: "yes" -dns_backend: "coredns" # 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP) CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}" + +# kubednsVer: "1.14.13" -corednsVer: "1.7.1" kubedns_offline: "kubedns_{{ kubednsVer }}.tar" coredns_offline: "coredns_{{ corednsVer }}.tar" dns_offline: "{%- if dns_backend == 'coredns' -%} \ @@ -13,17 +11,9 @@ dns_offline: "{%- if dns_backend == 'coredns' -%} \ {{ kubedns_offline }} \ {%- endif -%}" -# metric server 自动安装 -metricsserver_install: "yes" -metricsVer: "v0.3.6" metricsserver_offline: "metrics-server_{{ metricsVer }}.tar" -# dashboard 自动安装 -# dashboard v2.x.x 不依赖于heapster -dashboard_install: "yes" -dashboardVer: "v2.0.4" dashboard_offline: "dashboard_{{ dashboardVer }}.tar" -dashboardMetricsScraperVer: "v1.0.4" metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar" # ingress 自动安装,可选 "traefik" 和 "nginx-ingress" @@ -36,9 +26,3 @@ metricsscraper_offline: "metrics-scraper_{{ dashboardMetricsScraperVer }}.tar" #metallb_protocol: "layer2" #metallb_offline: "metallb_{{ metallbVer }}.tar" #metallb_vip_pool: "192.168.1.240/29" - -# efk 自动安装 -#efk_install: "no" - -# prometheus 自动安装 -#prometheus_install: "no" diff --git a/roles/cluster-restore/defaults/main.yml b/roles/cluster-restore/defaults/main.yml index 0398c0c..16a043d 100644 --- a/roles/cluster-restore/defaults/main.yml +++ b/roles/cluster-restore/defaults/main.yml @@ -1,5 +1,5 @@ # 指定需要恢复的 etcd 数据备份,默认使用最近的一次备份 -# 在ansible 控制端查看备份目录:/etc/ansible/.cluster/backup +# 在ansible 控制端查看备份目录:/etc/kubeasz/clusters/_cluster_name_/backup db_to_restore: "snapshot.db" # etcd 集群间通信的IP和端口, 根据etcd组成员自动生成 diff --git a/roles/cluster-restore/tasks/main.yml b/roles/cluster-restore/tasks/main.yml index 1d0616c..8d42011 100644 --- a/roles/cluster-restore/tasks/main.yml +++ b/roles/cluster-restore/tasks/main.yml @@ -9,7 +9,7 @@ - name: 准备指定的备份etcd 数据 copy: - src: "{{ base_dir }}/.cluster/backup/{{ db_to_restore }}" + src: "{{ cluster_dir }}/backup/{{ db_to_restore }}" dest: "/etcd_backup/snapshot.db" - name: 清理上次备份恢复数据 diff --git a/roles/containerd/defaults/main.yml b/roles/containerd/defaults/main.yml deleted file mode 100644 index 23c1b0f..0000000 --- a/roles/containerd/defaults/main.yml +++ /dev/null @@ -1,8 +0,0 @@ -# 容器持久化存储目录 -STORAGE_DIR: "/var/lib/containerd" - -# 基础容器镜像 -SANDBOX_IMAGE: "easzlab/pause-amd64:3.2" - -# 启用容器仓库镜像 -ENABLE_MIRROR_REGISTRY: true diff --git a/roles/containerd/templates/config.toml.j2 b/roles/containerd/templates/config.toml.j2 index dd9ceda..faf52c2 100644 --- a/roles/containerd/templates/config.toml.j2 +++ b/roles/containerd/templates/config.toml.j2 @@ -1,4 +1,4 @@ -root = "{{ STORAGE_DIR }}" +root = "{{ CONTAINERD_STORAGE_DIR }}" state = "/run/containerd" oom_score = -999 diff --git a/roles/deploy/defaults/main.yml b/roles/deploy/defaults/main.yml deleted file mode 100644 index d402c64..0000000 --- a/roles/deploy/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ -# CA 证书相关参数 -CA_EXPIRY: "876000h" -CERT_EXPIRY: "438000h" - -# apiserver 默认第一个master节点 -KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443" - -# kubeconfig 配置参数,注意权限根据‘USER_NAME’设置: -# 'admin' 表示创建集群管理员(所有)权限的 kubeconfig -# 'read' 表示创建只读权限的 kubeconfig -CLUSTER_NAME: "cluster1" -USER_NAME: "admin" -CONTEXT_NAME: "context-{{ CLUSTER_NAME }}-{{ USER_NAME }}" diff --git a/roles/deploy/tasks/create-kube-controller-manager-kubeconfig.yml b/roles/deploy/tasks/create-kube-controller-manager-kubeconfig.yml index 64c8e6a..df42d03 100644 --- a/roles/deploy/tasks/create-kube-controller-manager-kubeconfig.yml +++ b/roles/deploy/tasks/create-kube-controller-manager-kubeconfig.yml @@ -1,8 +1,8 @@ - name: 准备kube-controller-manager 证书签名请求 - template: src=kube-controller-manager-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/kube-controller-manager-csr.json + template: src=kube-controller-manager-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-controller-manager-csr.json - name: 创建 kube-controller-manager证书与私钥 - shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \ + shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ @@ -10,24 +10,24 @@ - name: 设置集群参数 shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \ - --certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \ + --certificate-authority={{ cluster_dir }}/ssl/ca.pem \ --embed-certs=true \ --server={{ KUBE_APISERVER }} \ - --kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig" - name: 设置认证参数 shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-controller-manager \ - --client-certificate={{ base_dir }}/.cluster/ssl/kube-controller-manager.pem \ - --client-key={{ base_dir }}/.cluster/ssl/kube-controller-manager-key.pem \ + --client-certificate={{ cluster_dir }}/ssl/kube-controller-manager.pem \ + --client-key={{ cluster_dir }}/ssl/kube-controller-manager-key.pem \ --embed-certs=true \ - --kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig" - name: 设置上下文参数 shell: "{{ base_dir }}/bin/kubectl config set-context default \ --cluster=kubernetes \ --user=system:kube-controller-manager \ - --kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig" - name: 选择默认上下文 shell: "{{ base_dir }}/bin/kubectl config use-context default \ - --kubeconfig={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig" diff --git a/roles/deploy/tasks/create-kube-proxy-kubeconfig.yml b/roles/deploy/tasks/create-kube-proxy-kubeconfig.yml index 41547b1..27a3c40 100644 --- a/roles/deploy/tasks/create-kube-proxy-kubeconfig.yml +++ b/roles/deploy/tasks/create-kube-proxy-kubeconfig.yml @@ -1,8 +1,8 @@ - name: 准备kube-proxy 证书签名请求 - template: src=kube-proxy-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/kube-proxy-csr.json + template: src=kube-proxy-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-proxy-csr.json - name: 创建 kube-proxy证书与私钥 - shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \ + shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ @@ -10,24 +10,24 @@ - name: 设置集群参数 shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \ - --certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \ + --certificate-authority={{ cluster_dir }}/ssl/ca.pem \ --embed-certs=true \ --server={{ KUBE_APISERVER }} \ - --kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig" - name: 设置客户端认证参数 shell: "{{ base_dir }}/bin/kubectl config set-credentials kube-proxy \ - --client-certificate={{ base_dir }}/.cluster/ssl/kube-proxy.pem \ - --client-key={{ base_dir }}/.cluster/ssl/kube-proxy-key.pem \ + --client-certificate={{ cluster_dir }}/ssl/kube-proxy.pem \ + --client-key={{ cluster_dir }}/ssl/kube-proxy-key.pem \ --embed-certs=true \ - --kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig" - name: 设置上下文参数 shell: "{{ base_dir }}/bin/kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ - --kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig" - name: 选择默认上下文 shell: "{{ base_dir }}/bin/kubectl config use-context default \ - --kubeconfig={{ base_dir }}/.cluster/kube-proxy.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig" diff --git a/roles/deploy/tasks/create-kube-scheduler-kubeconfig.yml b/roles/deploy/tasks/create-kube-scheduler-kubeconfig.yml index a3db92f..9d2869e 100644 --- a/roles/deploy/tasks/create-kube-scheduler-kubeconfig.yml +++ b/roles/deploy/tasks/create-kube-scheduler-kubeconfig.yml @@ -1,8 +1,8 @@ - name: 准备kube-scheduler 证书签名请求 - template: src=kube-scheduler-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/kube-scheduler-csr.json + template: src=kube-scheduler-csr.json.j2 dest={{ cluster_dir }}/ssl/kube-scheduler-csr.json - name: 创建 kube-scheduler证书与私钥 - shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \ + shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ @@ -10,24 +10,24 @@ - name: 设置集群参数 shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \ - --certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \ + --certificate-authority={{ cluster_dir }}/ssl/ca.pem \ --embed-certs=true \ --server={{ KUBE_APISERVER }} \ - --kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig" - name: 设置认证参数 shell: "{{ base_dir }}/bin/kubectl config set-credentials system:kube-scheduler \ - --client-certificate={{ base_dir }}/.cluster/ssl/kube-scheduler.pem \ - --client-key={{ base_dir }}/.cluster/ssl/kube-scheduler-key.pem \ + --client-certificate={{ cluster_dir }}/ssl/kube-scheduler.pem \ + --client-key={{ cluster_dir }}/ssl/kube-scheduler-key.pem \ --embed-certs=true \ - --kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig" - name: 设置上下文参数 shell: "{{ base_dir }}/bin/kubectl config set-context default \ --cluster=kubernetes \ --user=system:kube-scheduler \ - --kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig" - name: 选择默认上下文 shell: "{{ base_dir }}/bin/kubectl config use-context default \ - --kubeconfig={{ base_dir }}/.cluster/kube-scheduler.kubeconfig" + --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig" diff --git a/roles/deploy/tasks/create-kubectl-kubeconfig.yml b/roles/deploy/tasks/create-kubectl-kubeconfig.yml index 71b6b4d..2f9d75b 100644 --- a/roles/deploy/tasks/create-kubectl-kubeconfig.yml +++ b/roles/deploy/tasks/create-kubectl-kubeconfig.yml @@ -11,10 +11,10 @@ when: USER_NAME == "read" - name: 准备kubectl使用的{{ USER_NAME }}证书签名请求 - template: src={{ USER_NAME }}-csr.json.j2 dest={{ base_dir }}/.cluster/ssl/{{ USER_NAME }}-csr.json + template: src={{ USER_NAME }}-csr.json.j2 dest={{ cluster_dir }}/ssl/{{ USER_NAME }}-csr.json - name: 创建{{ USER_NAME }}证书与私钥 - shell: "cd {{ base_dir }}/.cluster/ssl && {{ base_dir }}/bin/cfssl gencert \ + shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ @@ -22,15 +22,15 @@ - name: 设置集群参数 shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \ - --certificate-authority={{ base_dir }}/.cluster/ssl/ca.pem \ + --certificate-authority={{ cluster_dir }}/ssl/ca.pem \ --embed-certs=true \ --server={{ KUBE_APISERVER }}" - name: 设置客户端认证参数 shell: "{{ base_dir }}/bin/kubectl config set-credentials {{ USER_NAME }} \ - --client-certificate={{ base_dir }}/.cluster/ssl/{{ USER_NAME }}.pem \ + --client-certificate={{ cluster_dir }}/ssl/{{ USER_NAME }}.pem \ --embed-certs=true \ - --client-key={{ base_dir }}/.cluster/ssl/{{ USER_NAME }}-key.pem" + --client-key={{ cluster_dir }}/ssl/{{ USER_NAME }}-key.pem" - name: 设置上下文参数 shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \ diff --git a/roles/deploy/tasks/main.yml b/roles/deploy/tasks/main.yml index f792a3d..66574e1 100644 --- a/roles/deploy/tasks/main.yml +++ b/roles/deploy/tasks/main.yml @@ -1,8 +1,8 @@ - name: prepare some dirs file: name={{ item }} state=directory with_items: - - "{{ base_dir }}/.cluster/ssl" - - "{{ base_dir }}/.cluster/backup" + - "{{ cluster_dir }}/ssl" + - "{{ cluster_dir }}/backup" - name: 本地设置 bin 目录权限 file: path={{ base_dir }}/bin state=directory mode=0755 recurse=yes @@ -10,11 +10,11 @@ # 注册变量p,根据p的stat信息判断是否已经生成过ca证书,如果没有,下一步生成证书 # 如果已经有ca证书,为了保证整个安装的幂等性,跳过证书生成的步骤 - name: 读取ca证书stat信息 - stat: path="{{ base_dir }}/.cluster/ssl/ca.pem" + stat: path="{{ cluster_dir }}/ssl/ca.pem" register: p - name: 准备CA配置文件和签名请求 - template: src={{ item }}.j2 dest={{ base_dir }}/.cluster/ssl/{{ item }} + template: src={{ item }}.j2 dest={{ cluster_dir }}/ssl/{{ item }} with_items: - "ca-config.json" - "ca-csr.json" @@ -22,7 +22,7 @@ - name: 生成 CA 证书和私钥 when: p.stat.isreg is not defined - shell: "cd {{ base_dir }}/.cluster/ssl && \ + shell: "cd {{ cluster_dir }}/ssl && \ {{ base_dir }}/bin/cfssl gencert -initca ca-csr.json | {{ base_dir }}/bin/cfssljson -bare ca" #----------- 创建配置文件: /root/.kube/config diff --git a/roles/deploy/vars/main.yml b/roles/deploy/vars/main.yml new file mode 100644 index 0000000..3e9ca34 --- /dev/null +++ b/roles/deploy/vars/main.yml @@ -0,0 +1,2 @@ +# apiserver 默认第一个master节点 +KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443" diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml deleted file mode 100644 index 77e4fa5..0000000 --- a/roles/docker/defaults/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -# docker容器存储目录 -STORAGE_DIR: "/var/lib/docker" - -# 开启Restful API -ENABLE_REMOTE_API: false - -# 启用 docker 仓库镜像 -ENABLE_MIRROR_REGISTRY: true - -# 设置 docker 仓库镜像 -REG_MIRRORS: '["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]' - -# 信任的HTTP仓库 -INSECURE_REG: '["127.0.0.1/8"]' diff --git a/roles/docker/templates/daemon.json.j2 b/roles/docker/templates/daemon.json.j2 index eeb5d62..19643c1 100644 --- a/roles/docker/templates/daemon.json.j2 +++ b/roles/docker/templates/daemon.json.j2 @@ -1,8 +1,11 @@ { - "data-root": "{{ STORAGE_DIR }}", + "data-root": "{{ DOCKER_STORAGE_DIR }}", "exec-opts": ["native.cgroupdriver=cgroupfs"], {% if ENABLE_MIRROR_REGISTRY %} - "registry-mirrors": {{ REG_MIRRORS }}, + "registry-mirrors": [ + "https://docker.mirrors.ustc.edu.cn", + "http://hub-mirror.c.163.com" + ], {% endif %} {% if ENABLE_REMOTE_API %} "hosts": ["tcp://0.0.0.0:2376", "unix:///var/run/docker.sock"], diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index d624d1f..2f88b9e 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -14,7 +14,7 @@ tags: upgrade_etcd - name: 分发证书相关 - copy: src={{ base_dir }}/.cluster/ssl/{{ item }} dest={{ ca_dir }}/{{ item }} + copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }} with_items: - ca.pem - ca-key.pem diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/vars/main.yml similarity index 100% rename from roles/etcd/defaults/main.yml rename to roles/etcd/vars/main.yml diff --git a/roles/flannel/defaults/main.yml b/roles/flannel/defaults/main.yml deleted file mode 100644 index af67153..0000000 --- a/roles/flannel/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ -# 部分flannel配置,参考 docs/setup/network-plugin/flannel.md - -# 设置flannel 后端 -#FLANNEL_BACKEND: "host-gw" -FLANNEL_BACKEND: "vxlan" -DIRECT_ROUTING: false - -#flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64" -flannelVer: "v0.13.0-amd64" -flanneld_image: "easzlab/flannel:{{ flannelVer }}" - -# 离线镜像tar包 -flannel_offline: "flannel_{{ flannelVer }}.tar" diff --git a/roles/harbor/tasks/main.yml b/roles/harbor/tasks/main.yml index 98b4be6..2c3a859 100644 --- a/roles/harbor/tasks/main.yml +++ b/roles/harbor/tasks/main.yml @@ -34,7 +34,7 @@ - block: - name: 生成自签名证书相关 - copy: src={{ base_dir }}/.cluster/ssl/{{ item }} dest={{ ca_dir }}/{{ item }} + copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }} with_items: - ca.pem - ca-key.pem diff --git a/roles/harbor/defaults/main.yml b/roles/harbor/vars/main.yml similarity index 55% rename from roles/harbor/defaults/main.yml rename to roles/harbor/vars/main.yml index fd1ced3..0cbff3e 100644 --- a/roles/harbor/defaults/main.yml +++ b/roles/harbor/vars/main.yml @@ -1,9 +1,6 @@ -# harbor version,完整版本号,目前支持 v1.5.x , v1.6.x, v1.7.x, v1.8.x, v1.9.x, v1.10.x -HARBOR_VER: "v1.9.4" - # harbor 主版本号,目前支持主版本号 v1.5/v1.6/v1.7/v1.8/v1.9/v.10 # 从完整版本号提取出主版本号 v1.5/v1.6/v1.7/v1.8/v1.9/v.10 HARBOR_VER_MAIN: "{{ HARBOR_VER.split('.')[0] }}.{{ HARBOR_VER.split('.')[1] }}" # HARBOR_HOSTNAME 值设置 -HARBOR_HOSTNAME: "{{ inventory_hostname if (HARBOR_DOMAIN == '') else HARBOR_DOMAIN }}" +HARBOR_HOSTNAME: "{% if HARBOR_DOMAIN != '' %}HARBOR_DOMAIN{% else %}inventory_hostname{% endif %}" diff --git a/roles/kube-master/defaults/main.yml b/roles/kube-master/defaults/main.yml deleted file mode 100644 index 4afa6a0..0000000 --- a/roles/kube-master/defaults/main.yml +++ /dev/null @@ -1,29 +0,0 @@ -# etcd 集群服务地址列表, 根据etcd组成员自动生成 -TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}" -ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}" -# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第1个IP) -CLUSTER_KUBERNETES_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" -# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名) -MASTER_CERT_HOSTS: - - "10.1.1.1" - - "k8s.test.io" - #- "61.182.11.41" - #- "www.test.com" - -# apiserver 基础认证(用户名/密码)配置,详见 master 节点文件‘/etc/kubernetes/ssl/basic-auth.csv’ -# the first three values can be anything; -# These tokens are arbitrary but should represent at least 128 bits of entropy derived from -# a secure random number generator, for example: -# head -c 16 /dev/urandom | od -An -t x | tr -d ' ' -# 例子: 02b50b05283e98dd0fd71db496ef01e8,kubelet-bootstrap,10001,"system:bootstrappers" -# 是否启用基础认证 yes/no -BASIC_AUTH_ENABLE: 'yes' -# 用户名: -BASIC_AUTH_USER: 'admin' -# 密码:初次运行时会生成随机密码 -BASIC_AUTH_PASS: '92c068405aa491239b56140ea6b3b44b' - -# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址) -# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段 -# https://github.com/coreos/flannel/issues/847 -NODE_CIDR_LEN: 24 diff --git a/roles/kube-master/tasks/main.yml b/roles/kube-master/tasks/main.yml index c1a31f4..749beb7 100644 --- a/roles/kube-master/tasks/main.yml +++ b/roles/kube-master/tasks/main.yml @@ -30,25 +30,6 @@ -config={{ ca_dir }}/ca-config.json \ -profile=kubernetes aggregator-proxy-csr.json | {{ bin_dir }}/cfssljson -bare aggregator-proxy" -- block: - - name: 生成 basic-auth 随机密码 - shell: 'PWD=`head -c 16 /dev/urandom | od -An -t x | tr -d " "`; \ - sed -i "s/_pwd_/$PWD/g" {{ base_dir }}/roles/kube-master/defaults/main.yml; \ - echo $PWD;' - connection: local - register: TMP_PASS - run_once: true - - - name: 设置 basic-auth 随机密码 - set_fact: BASIC_AUTH_PASS={{ TMP_PASS.stdout }} - when: 'BASIC_AUTH_ENABLE == "yes" and BASIC_AUTH_PASS == "_pwd_"' - tags: restart_master - -- name: 创建 basic-auth.csv - template: src=basic-auth.csv.j2 dest={{ ca_dir }}/basic-auth.csv - when: 'BASIC_AUTH_ENABLE == "yes"' - tags: restart_master - - name: 替换 kubeconfig 的 apiserver 地址 lineinfile: dest: "{{ item }}" @@ -84,14 +65,3 @@ retries: 5 delay: 6 tags: upgrade_k8s, restart_master - -- name: 配置{{ BASIC_AUTH_USER }}用户rbac权限 - template: src=basic-auth-rbac.yaml.j2 dest=/opt/kube/basic-auth-rbac.yaml - when: 'BASIC_AUTH_ENABLE == "yes"' - tags: restart_master - -- name: 创建{{ BASIC_AUTH_USER }}用户rbac权限 - shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/basic-auth-rbac.yaml" - when: 'BASIC_AUTH_ENABLE == "yes"' - run_once: true - tags: restart_master diff --git a/roles/kube-master/templates/basic-auth-rbac.yaml.j2 b/roles/kube-master/templates/basic-auth-rbac.yaml.j2 deleted file mode 100644 index 4407c96..0000000 --- a/roles/kube-master/templates/basic-auth-rbac.yaml.j2 +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: basic-auth-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: {{ BASIC_AUTH_USER }} diff --git a/roles/kube-master/templates/basic-auth.csv.j2 b/roles/kube-master/templates/basic-auth.csv.j2 deleted file mode 100644 index 00d4e3e..0000000 --- a/roles/kube-master/templates/basic-auth.csv.j2 +++ /dev/null @@ -1,2 +0,0 @@ -{{ BASIC_AUTH_PASS }},{{ BASIC_AUTH_USER }},1 -{{ BASIC_AUTH_PASS | truncate(8, True, '') }},readonly,2 diff --git a/roles/kube-master/templates/kube-apiserver.service.j2 b/roles/kube-master/templates/kube-apiserver.service.j2 index c1ad2b4..e2aa117 100644 --- a/roles/kube-master/templates/kube-apiserver.service.j2 +++ b/roles/kube-master/templates/kube-apiserver.service.j2 @@ -10,9 +10,6 @@ ExecStart={{ bin_dir }}/kube-apiserver \ --anonymous-auth=false \ --api-audiences=api,istio-ca \ --authorization-mode=Node,RBAC \ -{% if BASIC_AUTH_ENABLE == "yes" %} - --token-auth-file={{ ca_dir }}/basic-auth.csv \ -{% endif %} --bind-address={{ inventory_hostname }} \ --client-ca-file={{ ca_dir }}/ca.pem \ --endpoint-reconciler-type=lease \ diff --git a/roles/kube-master/templates/kube-controller-manager.service.j2 b/roles/kube-master/templates/kube-controller-manager.service.j2 index c1283d6..5d72ad0 100644 --- a/roles/kube-master/templates/kube-controller-manager.service.j2 +++ b/roles/kube-master/templates/kube-controller-manager.service.j2 @@ -10,7 +10,7 @@ ExecStart={{ bin_dir }}/kube-controller-manager \ --cluster-name=kubernetes \ --cluster-signing-cert-file={{ ca_dir }}/ca.pem \ --cluster-signing-key-file={{ ca_dir }}/ca-key.pem \ - --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ + --kubeconfig={{ cluster_dir }}/kube-controller-manager.kubeconfig \ --leader-elect=true \ --node-cidr-mask-size={{ NODE_CIDR_LEN }} \ --root-ca-file={{ ca_dir }}/ca.pem \ diff --git a/roles/kube-master/templates/kube-scheduler.service.j2 b/roles/kube-master/templates/kube-scheduler.service.j2 index 77d80b4..9985457 100644 --- a/roles/kube-master/templates/kube-scheduler.service.j2 +++ b/roles/kube-master/templates/kube-scheduler.service.j2 @@ -5,7 +5,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart={{ bin_dir }}/kube-scheduler \ --address=127.0.0.1 \ - --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ + --kubeconfig={{ cluster_dir }}/kube-scheduler.kubeconfig \ --leader-elect=true \ --v=2 Restart=always diff --git a/roles/kube-master/vars/main.yml b/roles/kube-master/vars/main.yml new file mode 100644 index 0000000..a6029e2 --- /dev/null +++ b/roles/kube-master/vars/main.yml @@ -0,0 +1,6 @@ +# etcd 集群服务地址列表, 根据etcd组成员自动生成 +TMP_ENDPOINTS: "{% for h in groups['etcd'] %}https://{{ h }}:2379,{% endfor %}" +ETCD_ENDPOINTS: "{{ TMP_ENDPOINTS.rstrip(',') }}" + +# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第1个IP) +CLUSTER_KUBERNETES_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" diff --git a/roles/kube-node/defaults/main.yml b/roles/kube-node/defaults/main.yml deleted file mode 100644 index 359121c..0000000 --- a/roles/kube-node/defaults/main.yml +++ /dev/null @@ -1,41 +0,0 @@ -# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP) -CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}" - -# 基础容器镜像 -SANDBOX_IMAGE: "easzlab/pause-amd64:3.2" - -# Kubelet 根目录 -KUBELET_ROOT_DIR: "/var/lib/kubelet" - -# node节点最大pod 数 -MAX_PODS: 110 - -# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量 -# 数值设置详见templates/kubelet-config.yaml.j2 -KUBE_RESERVED_ENABLED: "yes" - -# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况; -# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2 -# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留 -# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存 -SYS_RESERVED_ENABLED: "no" - -# haproxy balance mode -BALANCE_ALG: "roundrobin" - -# 设置 APISERVER 地址 -KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \ - https://{{ inventory_hostname }}:6443 \ - {%- else -%} \ - {%- if groups['kube-master']|length > 1 -%} \ - https://127.0.0.1:6443 \ - {%- else -%} \ - https://{{ groups['kube-master'][0] }}:6443 \ - {%- endif -%} \ - {%- endif -%}" - -# 增加/删除 master 节点时,node 节点需要重新配置 haproxy 等 -MASTER_CHG: "no" - -# 离线安装 haproxy (offline|online) -INSTALL_SOURCE: "online" diff --git a/roles/kube-node/templates/kube-proxy.service.j2 b/roles/kube-node/templates/kube-proxy.service.j2 index 0b47527..c3c75d5 100644 --- a/roles/kube-node/templates/kube-proxy.service.j2 +++ b/roles/kube-node/templates/kube-proxy.service.j2 @@ -10,7 +10,7 @@ ExecStart={{ bin_dir }}/kube-proxy \ --bind-address={{ inventory_hostname }} \ --cluster-cidr={{ CLUSTER_CIDR }} \ --hostname-override={{ inventory_hostname }} \ - --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ + --kubeconfig={{ cluster_dir }}/kube-proxy.kubeconfig \ --logtostderr=true \ --proxy-mode={{ PROXY_MODE }} Restart=always diff --git a/roles/kube-node/vars/main.yml b/roles/kube-node/vars/main.yml new file mode 100644 index 0000000..ed88888 --- /dev/null +++ b/roles/kube-node/vars/main.yml @@ -0,0 +1,13 @@ +# 设置 dns svc ip (这里选用 SERVICE_CIDR 中第2个IP) +CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}" + +# 设置 APISERVER 地址 +KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \ + https://{{ inventory_hostname }}:6443 \ + {%- else -%} \ + {%- if groups['kube-master']|length > 1 -%} \ + https://127.0.0.1:6443 \ + {%- else -%} \ + https://{{ groups['kube-master'][0] }}:6443 \ + {%- endif -%} \ + {%- endif -%}" diff --git a/roles/kube-ovn/defaults/main.yml b/roles/kube-ovn/vars/main.yml similarity index 52% rename from roles/kube-ovn/defaults/main.yml rename to roles/kube-ovn/vars/main.yml index c2d5333..453cf76 100644 --- a/roles/kube-ovn/defaults/main.yml +++ b/roles/kube-ovn/vars/main.yml @@ -1,10 +1,4 @@ -# 选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点 -OVN_DB_NODE: "{{ groups['kube-master'][0] }}" - kube_ovn_default_cidr: "{{ CLUSTER_CIDR }}" kube_ovn_default_gateway: "{{ CLUSTER_CIDR | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" kube_ovn_node_switch_cidr: 100.64.0.0/16 kube_ovn_enable_mirror: true - -# 离线镜像tar包 -kube_ovn_offline: "kube_ovn_0.9.1.tar" diff --git a/roles/kube-router/defaults/main.yml b/roles/kube-router/defaults/main.yml deleted file mode 100644 index c68a0a9..0000000 --- a/roles/kube-router/defaults/main.yml +++ /dev/null @@ -1,18 +0,0 @@ -# 更多设置,参考https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md - -# 因目前 kube-proxy 已提供 ipvs 模式,这里不使用 kube-router 的 service_proxy -#SERVICE_PROXY: "false" - -# 公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet" -OVERLAY_TYPE: "full" - -# NetworkPolicy 支持开关 -FIREWALL_ENABLE: "true" - -# kube-router 镜像版本 -kube_router_ver: "v0.3.1" -busybox_ver: "1.28.4" - -# kube-router 离线镜像tar包 -kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar" -busybox_offline: "busybox_{{ busybox_ver }}.tar" diff --git a/roles/prepare/defaults/main.yml b/roles/prepare/defaults/main.yml deleted file mode 100644 index 3edb940..0000000 --- a/roles/prepare/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -# 离线安装系统软件包 (offline|online) -INSTALL_SOURCE: "online" - diff --git a/roles/prepare/tasks/main.yml b/roles/prepare/tasks/main.yml index 45f3aa3..f4b2bdb 100644 --- a/roles/prepare/tasks/main.yml +++ b/roles/prepare/tasks/main.yml @@ -38,7 +38,7 @@ - block: - name: 分发证书相关 - copy: src={{ base_dir }}/.cluster/ssl/{{ item }} dest={{ ca_dir }}/{{ item }} + copy: src={{ cluster_dir }}/ssl/{{ item }} dest={{ ca_dir }}/{{ item }} with_items: - admin.pem - admin-key.pem @@ -57,13 +57,13 @@ copy: src=/root/.kube/config dest=/root/.kube/config - name: 分发 kube-proxy.kubeconfig配置文件 - copy: src={{ base_dir }}/.cluster/kube-proxy.kubeconfig dest=/etc/kubernetes/kube-proxy.kubeconfig + copy: src={{ cluster_dir }}/kube-proxy.kubeconfig dest={{ cluster_dir }}/kube-proxy.kubeconfig - name: 分发 kube-controller-manager.kubeconfig配置文件 - copy: src={{ base_dir }}/.cluster/kube-controller-manager.kubeconfig dest=/etc/kubernetes/kube-controller-manager.kubeconfig + copy: src={{ cluster_dir }}/kube-controller-manager.kubeconfig dest={{ cluster_dir }}/kube-controller-manager.kubeconfig when: "inventory_hostname in groups['kube-master']" - name: 分发 kube-scheduler.kubeconfig配置文件 - copy: src={{ base_dir }}/.cluster/kube-scheduler.kubeconfig dest=/etc/kubernetes/kube-scheduler.kubeconfig + copy: src={{ cluster_dir }}/kube-scheduler.kubeconfig dest={{ cluster_dir }}/kube-scheduler.kubeconfig when: "inventory_hostname in groups['kube-master']" when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']" diff --git a/tools/basic-env-setup.sh b/tools/basic-env-setup.sh deleted file mode 100644 index 04dd26f..0000000 --- a/tools/basic-env-setup.sh +++ /dev/null @@ -1,152 +0,0 @@ -#!/bin/bash - -# 本脚本提供如下功能,作者:Joey Yang, https://github.com/Code2Life -# 1. 在Ubuntu/CentOS/Fedora/ArchLinux中自动化的安装python+ansible; -# 2. clone kubeasz项目代码, 并将需要的二进制文件下载解压到/etc/ansible/bin中; -# 另外, 相关的k8s二进制文件, 我同步到了个人在七牛上的CDN存储中(速度更快), 方便大家下载: filecdn.code2life.top; -# -# 使用方法: -# 1. 支持带参数的运行, 如: ./basic-env-setup.sh k8s.193.tar.gz 指定不同的kubernetes二进制文件, 无参数时默认最新的k8s.1100.tar.gz (k8s 1.10.0 + etcd 3.3.2). -# 2. 也可以在任何一台支持的linux设备运行:curl http://filecdn.code2life.top/kubeasz-basic-env-setup.sh | sh -s -# 已经亲测centos7/ubuntu16.04/debian9/fedora27都是可以的, 二进制包下载速度贼快.脚本运行完毕之后, 只需到/etc/ansible目录下配置好hosts, 复制完ssh的公钥即可通过ansible-playbook迅速搭建集群了. - -set -e - -# curl http://filecdn.code2life.top/kubeasz-basic-env-setup.sh | sh -s - -# 默认1.10.0 版本的 Kubernetes -bin_resource_url='http://filecdn.code2life.top/k8s.1100.tar.gz' - -# 如果参数指定k8s相关的bin以指定的为准, 例如: k8s.193.tar.gz -if [ "$1" ];then - bin_resource_url="http://filecdn.code2life.top/"$1 -fi - -# 各Linux版本安装python/pip -# --------------------------- - -# debian 默认的apt源在国内访问很慢, 可手动修改/etc/apt/sources.list修改为其他源 -# 以 debian 9 为例, source.list可修改为如下内容, ubuntu修改方法类似, 找到相应系统和版本的镜像源替换即可 -# deb http://mirrors.163.com/debian/ stretch main non-free contrib -# deb http://mirrors.163.com/debian/ stretch-updates main non-free contrib -# deb http://mirrors.163.com/debian/ stretch-backports main non-free contrib -# deb http://mirrors.163.com/debian-security/ stretch/updates main non-free contrib -basic_ubuntu_debian() { - echo "Setup Basic Environment for Ubuntu/Debian." - apt-get update && apt-get upgrade -y && apt-get dist-upgrade -y - apt-get install python2.7 git python-pip curl -y - - if [ ! -f /usr/bin/python ];then - ln -s /usr/bin/python2.7 /usr/bin/python - fi -} - -# 红帽系Liunx可修改yum源加快下载速度, 修改/etc/yum.repos.d内文件即可 -basic_centos() { - echo "Setup Basic Environment for CentOS." - yum install epel-release -y - yum update -y - yum erase firewalld firewalld-filesystem python-firewall -y - yum install git python python-pip curl -y -} - -basic_fedora() { - echo "Setup Basic Environment for Fedora." - yum update -y - yum install git python python-pip curl -y -} - -# archlinux 使用pacman进行包管理 -basic_arch() { - pacman -Syu --noconfirm - pacman -S python git python-pip curl --noconfirm -} - -# 使用pip安装ansible, 并下载k8s相关bin文件 -setup_ansible_k8s() { - echo "Download Ansible and Kubernetes binaries." - pip install pip --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com - pip install --no-cache-dir ansible -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com - - git clone --depth=1 https://github.com/easzlab/kubeasz.git - mv kubeasz /etc/ansible - - # Download from CDN & Move bin files - curl -o k8s_download.tar.gz "$bin_resource_url" - tar xvf k8s_download.tar.gz - mv -f bin/* /etc/ansible/bin - rm -rf bin - echo "Finish setup. Please config your hosts and run 'ansible-playbook' command at /etc/ansible." -} -# --------------------------- - -# 判断Linux发行版, 执行不同基础环境设置方法 -# --------------------------- -lsb_dist='' -command_exists() { - command -v "$@" > /dev/null 2>&1 -} -if command_exists lsb_release; then - lsb_dist="$(lsb_release -si)" - lsb_version="$(lsb_release -rs)" -fi -if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then - lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" - lsb_version="$(. /etc/lsb-release && echo "$DISTRIB_RELEASE")" -fi -if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='debian' -fi -if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='fedora' -fi -if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" -fi -if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then - lsb_dist="$(cat /etc/*-release | head -n1 | cut -d " " -f1)" -fi -if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then - lsb_dist="$(cat /etc/*-release | head -n1 | cut -d " " -f1)" -fi -lsb_dist="$(echo $lsb_dist | cut -d " " -f1)" -lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" -# --------------------------- - -# --------------------------- -setup_env(){ - case "$lsb_dist" in - centos) - basic_centos - setup_ansible_k8s - exit 0 - ;; - fedora) - basic_fedora - setup_ansible_k8s - exit 0 - ;; - ubuntu) - basic_ubuntu_debian - setup_ansible_k8s - exit 0 - ;; - debian) - basic_ubuntu_debian - setup_ansible_k8s - exit 0 - ;; - arch) - basic_arch - setup_ansible_k8s - exit 0 - ;; - suse) - echo 'Not implementation yet.' - exit 1 - esac - echo "Error: Unsupported OS, please set ansible environment manually." - exit 1 -} -setup_env -# --------------------------- diff --git a/tools/change_ip_aio.yml b/tools/change_ip_aio.yml deleted file mode 100644 index 5ad227c..0000000 --- a/tools/change_ip_aio.yml +++ /dev/null @@ -1,43 +0,0 @@ -# 敬告:本脚本仅作测试交流使用,详细操作说明请参阅docs/op/change_ip_allinone.md -# 此脚本仅用于allinone部署情况下,需要修改host ip地址使用(比如,虚机装完allinone部署,克隆或者复制分享给别人测试使用) -# -# ------- 前提 :一个运行正常的allinone部署在虚机,关机后复制给别人使用,新虚机开机后如果需要修改IP,请执行如下步骤 -# 1. 修改ansible hosts文件:sed -i 's/$OLD_IP/$NEW_IP/g' /etc/ansible/hosts -# 2. 配置ssh免密码登录:ssh-copy-id $NEW_IP 按提示完成 -# 3. 检查下修改是否成功,并且能够成功执行 ansible all -m ping -# 4. 运行本脚本 ansible-playbook /etc/ansible/tools/change_ip_aio.yml - -- hosts: kube-master # hosts 角色无所谓,反正allinone所有角色都是同个ip - tasks: - - name: 删除一些证书和配置,后面会以新IP重新生成 - file: name={{ item }} state=absent - with_items: - - "/etc/etcd/ssl/etcd.pem" # 删除etcd证书 - - "/etc/kubernetes/ssl/kubernetes.pem" # 删除旧master证书 - - "/etc/kubernetes/kubelet.kubeconfig" # 删除旧kubelet配置文件 - -- hosts: kube-master - roles: - - deploy - - etcd - - kube-master - - kube-node - -- hosts: kube-master - tasks: - - name: 删除老IP地址的node - shell: "{{ bin_dir }}/kubectl get node |grep -v '{{ inventory_hostname }}'|awk '{print $1}' |xargs {{ bin_dir }}/kubectl delete node" - ignore_errors: true - - - name: 删除原network插件部署 - shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ CLUSTER_NETWORK }}.yaml || \ - {{ bin_dir }}/kubectl delete -f /opt/kube/kube-ovn/" - ignore_errors: true - -- hosts: kube-master - roles: - - { role: calico, when: "CLUSTER_NETWORK == 'calico'" } - - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" } - - { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" } - - { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" } - - { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" } diff --git a/tools/change_k8s_network.yml b/tools/change_k8s_network.yml deleted file mode 100644 index 83bd8d0..0000000 --- a/tools/change_k8s_network.yml +++ /dev/null @@ -1,97 +0,0 @@ -# 重置k8s pod网络脚本,使用请仔细阅读 docs/op/change_k8s_network.md -- hosts: - - kube-master - - kube-node - tasks: - - name: 获取所有已经创建的POD信息 - command: "{{ bin_dir }}/kubectl get daemonset -n kube-system" - register: pod_info - run_once: true - - - name: 删除原network插件部署 - shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ item }}.yaml" - with_items: - - calico - - cilium - - flannel - - kube-router - when: 'item in pod_info.stdout' - run_once: true - ignore_errors: true - - - name: 清理kube-router相关 - shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config" - ignore_errors: true - when: '"kube-router" in pod_info.stdout' - - - name: 停止 kube-node 相关服务 - service: name={{ item }} state=stopped - with_items: - - kubelet - - kube-proxy - ignore_errors: true - - - name: 清理calico残留路由 - shell: "for rt in `ip route|grep bird|sed 's/blackhole//'|awk '{print $1}'`;do ip route del $rt;done;" - when: '"calico" in pod_info.stdout' - ignore_errors: true - - - name: 清理 kube-proxy产生的iptables规则 - shell: "{{ bin_dir }}/kube-proxy --cleanup" - ignore_errors: true - - - name: 清理目录和文件 - file: name={{ item }} state=absent - with_items: - - "/etc/cni/" - - "/run/flannel/" - - "/etc/calico/" - - "/var/run/calico/" - - "/var/lib/calico/" - - "/var/log/calico/" - - "/etc/cilium/" - - "/var/run/cilium/" - - "/sys/fs/bpf/tc/" - - "/var/lib/cni/" - - "/var/lib/kube-router/" - - "/opt/kube/kube-system/" - - - name: 清理网络 - shell: "ip link del tunl0; \ - ip link del flannel.1; \ - ip link del cni0; \ - ip link del mynet0; \ - ip link del kube-bridge; \ - ip link del dummy0; \ - ip link del kube-ipvs0; \ - ip link del cilium_net; \ - ip link del cilium_vxlan; \ - systemctl restart networking; \ - systemctl restart network" - ignore_errors: true - - - name: 开启 kube-node 相关服务 - service: name={{ item }} state=started enabled=yes - with_items: - - kubelet - - kube-proxy - ignore_errors: true - -- hosts: - - kube-master - - kube-node - # 安装新的网络插件 - roles: - - { role: calico, when: "CLUSTER_NETWORK == 'calico'" } - - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" } - - { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" } - - { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" } - -- hosts: kube-node - tasks: - # 删除所有运行pod,由controller自动重建 - - name: 重启所有pod - shell: "for NS in $({{ bin_dir }}/kubectl get ns|awk 'NR>1{print $1}'); \ - do {{ bin_dir }}/kubectl delete pod --all -n $NS; done;" - ignore_errors: true - run_once: true