From 63a7e6d7eeef3609cf61f256cf0e0160d87c839f Mon Sep 17 00:00:00 2001 From: gjmzj Date: Tue, 19 Jan 2021 17:41:00 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dansible=20group=E5=91=BD?= =?UTF-8?q?=E5=90=8D=E4=B8=8D=E8=A7=84=E8=8C=83=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ansible.cfg | 2 +- docs/guide/harbor.md | 2 +- docs/op/ChangeVIP.md | 107 ------------------ docs/op/ch_apiserver_cert.md | 4 +- docs/op/loadballance_ingress_nodeport.md | 8 +- docs/op/op-index.md | 1 - docs/op/op-master.md | 14 +-- docs/op/op-node.md | 14 +-- docs/release-notes/kubeasz-0.5.1.md | 2 +- docs/release-notes/kubeasz-0.6.0.md | 2 +- docs/release-notes/kubeasz-1.0.0.md | 2 +- docs/release-notes/kubeasz-2.0.0.md | 8 +- docs/release-notes/kubeasz-2.0.2.md | 2 +- docs/release-notes/kubeasz-2.2.0.md | 2 +- docs/setup/00-planning_and_overall_intro.md | 1 - docs/setup/04-install_kube_master.md | 14 +-- docs/setup/05-install_kube_node.md | 6 +- docs/setup/07-install_cluster_addon.md | 2 +- docs/setup/ex-lb.md | 33 ++---- docs/setup/ezctl.md | 3 +- docs/setup/network-plugin/calico.md | 4 +- docs/setup/network-plugin/flannel.md | 4 +- example/config.yml | 4 +- example/hosts.allinone | 6 +- example/hosts.multi-node | 6 +- ezctl | 18 +-- playbooks/01.prepare.yml | 10 +- playbooks/03.runtime.yml | 4 +- playbooks/04.kube-master.yml | 6 +- playbooks/05.kube-node.yml | 6 +- playbooks/06.network.yml | 4 +- playbooks/07.cluster-addon.yml | 2 +- playbooks/11.harbor.yml | 4 +- playbooks/23.addmaster.yml | 2 +- playbooks/31.deletcd.yml | 2 +- playbooks/32.delnode.yml | 6 +- playbooks/33.delmaster.yml | 8 +- playbooks/90.setup.yml | 32 +++--- playbooks/91.start.yml | 18 +-- playbooks/92.stop.yml | 18 +-- playbooks/93.upgrade.yml | 8 +- playbooks/99.clean.yml | 6 +- roles/chrony/chrony.yml | 6 +- roles/cilium/cilium.yml | 4 +- roles/clean/defaults/main.yml | 4 +- roles/clean/tasks/clean_lb.yml | 2 +- roles/clean/tasks/clean_master.yml | 10 +- roles/clean/tasks/clean_node.yml | 8 +- .../templates/prometheus/values.yaml.j2 | 10 +- roles/deploy/tasks/main.yml | 7 +- roles/deploy/vars/main.yml | 2 +- roles/ex-lb/clean-ex-lb.yml | 2 +- roles/ex-lb/ex-lb.yml | 2 +- roles/ex-lb/tasks/main.yml | 6 +- roles/ex-lb/templates/haproxy.cfg.j2 | 24 ++-- .../ex-lb/templates/keepalived-backup.conf.j2 | 4 +- .../ex-lb/templates/keepalived-master.conf.j2 | 4 +- roles/kube-master/tasks/main.yml | 2 +- .../templates/kubernetes-csr.json.j2 | 6 +- roles/kube-node/tasks/main.yml | 4 +- roles/kube-node/tasks/node_lb.yml | 12 +- roles/kube-node/templates/haproxy.cfg.j2 | 4 +- roles/kube-node/vars/main.yml | 6 +- roles/kube-router/kube-router.yml | 4 +- roles/os-harden/os-harden.yml | 6 +- roles/prepare/tasks/main.yml | 4 +- 66 files changed, 214 insertions(+), 336 deletions(-) delete mode 100644 docs/op/ChangeVIP.md diff --git a/ansible.cfg b/ansible.cfg index e85aeed..12bbdd0 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -112,7 +112,7 @@ display_skipped_hosts = False # by default (as of 1.4), Ansible may display deprecation warnings for language # features that should no longer be used and will be removed in future versions. # to disable these warnings, set the following value to False: -#deprecation_warnings = True +deprecation_warnings = False # (as of 1.8), Ansible can optionally warn when usage of the shell and # command module appear to be simplified by using a default Ansible module diff --git a/docs/guide/harbor.md b/docs/guide/harbor.md index d1288c1..f3cf6df 100644 --- a/docs/guide/harbor.md +++ b/docs/guide/harbor.md @@ -41,7 +41,7 @@ Habor是由VMWare中国团队开源的容器镜像仓库。事实上,Habor是 - role `prepare` 基础系统环境准备 - role `docker` 安装docker - role `harbor` 安装harbor -- 注意:`kube-node`节点在harbor部署完之后,需要配置harbor的证书(详见下节配置docker/containerd信任harbor证书),并可以在hosts里面添加harbor的域名解析,如果你的环境中有dns服务器,可以跳过hosts文件设置 +- 注意:`kube_node`节点在harbor部署完之后,需要配置harbor的证书(详见下节配置docker/containerd信任harbor证书),并可以在hosts里面添加harbor的域名解析,如果你的环境中有dns服务器,可以跳过hosts文件设置 请在另外窗口打开 [roles/harbor/tasks/main.yml](../../roles/harbor/tasks/main.yml),对照以下讲解 diff --git a/docs/op/ChangeVIP.md b/docs/op/ChangeVIP.md deleted file mode 100644 index 6df41f7..0000000 --- a/docs/op/ChangeVIP.md +++ /dev/null @@ -1,107 +0,0 @@ -# 更改高可用 `Master IP` - -**WARNING:** 更改集群的 `Master VIP`操作有风险,不建议在生产环境直接操作,此文档实践一个修改的操作流程,帮助理解整个集群运行架构和 `kubeasz`的部署逻辑,请在测试环境操作练手。 -**BUG:** 目前该操作只适用于集群网络选用`calico`,如果使用`flannel`操作变更后会出现POD地址分配错误的BUG。 - -首先分析大概操作思路: - -- 修改`/etc/ansible/hosts`里面的配置项`MASTER_IP` `KUBE_APISERVER` -- 修改LB节点的keepalive的配置,重启keepalived服务 -- 修改kubectl/kube-proxy的配置文件,使用新VIP地址更新api-server地址 -- 重新生成master证书,hosts字段包含新VIP地址 -- 修改kubelet的配置文件(kubelet的配置文件和证书是由bootstrap机制自动生成的) - - 删除kubelet.kubeconfig - - 删除集群所有node 节点 - - 所有节点重新bootstrap - -## 变更前状态验证 - -``` bash -$ kubectl get cs,node,pod -o wide -NAME STATUS MESSAGE ERROR -controller-manager Healthy ok -scheduler Healthy ok -etcd-2 Healthy {"health":"true"} -etcd-0 Healthy {"health":"true"} -etcd-1 Healthy {"health":"true"} - -NAME STATUS ROLES AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -192.168.1.41 Ready,SchedulingDisabled 2h v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0 -192.168.1.42 Ready,SchedulingDisabled 2h v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0 -192.168.1.43 Ready 2h v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0 -192.168.1.44 Ready 2h v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0 -192.168.1.45 Ready 2h v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0 - -NAME READY STATUS RESTARTS AGE IP NODE -busy-5d6b6b5d4b-8wxkp 1/1 Running 0 17h 172.20.135.133 192.168.1.41 -busy-5d6b6b5d4b-fcmkp 1/1 Running 0 17h 172.20.135.128 192.168.1.41 -busy-5d6b6b5d4b-ptvd7 1/1 Running 0 17h 172.20.135.136 192.168.1.41 -nginx-768979984b-ncqbp 1/1 Running 0 17h 172.20.135.137 192.168.1.41 - -# 查看待变更集群 Master VIP -$ kubectl cluster-info -Kubernetes master is running at https://192.168.1.39:8443 -``` - -## 变更操作 - -- `ansible playbook`可以使用tags来控制只允许部分任务执行,这里为简化操作没有细化,在ansible控制端具体操作如下: - -``` bash -# 1.修改/etc/ansible/hosts 配置项MASTER_IP,KUBE_APISERVER - -# 2.删除集群所有node节点,等待重新bootstrap -$ kubectl get node |grep Ready|awk '{print $1}' |xargs kubectl delete node - -# 3.重置keepalived 和修改kubectl/kube-proxy/bootstrap配置 -$ ansible-playbook 01.prepare.yml - -# 4.删除旧master证书 -$ ansible kube-master -m file -a 'path=/etc/kubernetes/ssl/kubernetes.pem state=absent' - -# 5.删除旧kubelet配置文件 -$ ansible all -m file -a 'path=/etc/kubernetes/kubelet.kubeconfig state=absent' - -# 6.重新配置启动master节点 -$ ansible-playbook 04.kube-master.yml - -# 7.重新配置启动node节点 -$ ansible-playbook 05.kube-node.yml -``` - -## 变更后验证 - -``` bash -$ kubectl get cs,node,pod -o wide -NAME STATUS MESSAGE ERROR -scheduler Healthy ok -controller-manager Healthy ok -etcd-2 Healthy {"health":"true"} -etcd-1 Healthy {"health":"true"} -etcd-0 Healthy {"health":"true"} - -NAME STATUS ROLES AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -192.168.1.41 Ready,SchedulingDisabled 4m v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0 -192.168.1.42 Ready,SchedulingDisabled 4m v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0 -192.168.1.43 Ready 3m v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0 -192.168.1.44 Ready 3m v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0 -192.168.1.45 Ready 3m v1.10.0 Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0 - -NAME READY STATUS RESTARTS AGE IP NODE -busy-5d6b6b5d4b-25hfr 1/1 Running 0 5m 172.20.237.64 192.168.1.43 -busy-5d6b6b5d4b-cdzb5 1/1 Running 0 5m 172.20.145.192 192.168.1.44 -busy-5d6b6b5d4b-m2rf7 1/1 Running 0 5m 172.20.26.131 192.168.1.45 -nginx-768979984b-2ngww 1/1 Running 0 5m 172.20.145.193 192.168.1.44 - -# 查看集群master VIP已经变更 -$ kubectl cluster-info -Kubernetes master is running at https://192.168.1.40:8443 -``` - -## 小结 - -本示例操作演示了多主多节点k8s集群变更`Master VIP`的操作,有助于理解整个集群组件架构和`kubeasz`的安装逻辑,小结如下: - -- 变更操作不影响集群已运行的业务POD,但是操作过程中业务会中断 -- 已运行POD会重新调度到各node节点,如果业务POD量很大,短时间内会对集群造成压力 -- 不建议在生成环境直接操作,本示例演示说明为主 diff --git a/docs/op/ch_apiserver_cert.md b/docs/op/ch_apiserver_cert.md index 3fbd804..d0baf75 100644 --- a/docs/op/ch_apiserver_cert.md +++ b/docs/op/ch_apiserver_cert.md @@ -5,8 +5,8 @@ ``` "hosts": [ "127.0.0.1", -{% if groups['ex-lb']|length > 0 %} - "{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}", +{% if groups['ex_lb']|length > 0 %} + "{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}", {% endif %} "{{ inventory_hostname }}", "{{ CLUSTER_KUBERNETES_SVC_IP }}", diff --git a/docs/op/loadballance_ingress_nodeport.md b/docs/op/loadballance_ingress_nodeport.md index 2985ea4..8e79192 100644 --- a/docs/op/loadballance_ingress_nodeport.md +++ b/docs/op/loadballance_ingress_nodeport.md @@ -6,9 +6,9 @@ - 2.部署ingress-controller时使用`LoadBalancer`类型服务,需要集群支持`LoadBalancer` - 3.部署ingress-controller时使用`nodePort`类型服务,然后在集群外使用 haproxy/f5 等配置 virtual server 集群 -本文档讲解使用 haproxy 配置 ingress的 VS 集群,前提是配置了自建`ex-lb`节点 +本文档讲解使用 haproxy 配置 ingress的 VS 集群,前提是配置了自建`ex_lb`节点 -## 1.配置 ex-lb 参数开启转发 ingress nodeport +## 1.配置 ex_lb 参数开启转发 ingress nodeport ``` bash # 编辑 roles/ex-lb/defaults/main.yml,配置如下变量 @@ -22,11 +22,11 @@ INGRESS_TLS_NODEPORT_LB: "yes" $ ansible-playbook /etc/ansible/roles/ex-lb/ex-lb.yml ``` -## 3.验证 ex-lb 节点的 haproxy 服务配置 `/etc/haproxy/haproxy.cfg` 包含如下配置 +## 3.验证 ex_lb 节点的 haproxy 服务配置 `/etc/haproxy/haproxy.cfg` 包含如下配置 ``` bash ... 前文省略 -listen kube-master +listen kube_master bind 0.0.0.0:8443 mode tcp option tcplog diff --git a/docs/op/op-index.md b/docs/op/op-index.md index c722dba..123cd2a 100644 --- a/docs/op/op-index.md +++ b/docs/op/op-index.md @@ -4,7 +4,6 @@ - [管理 MASTER 节点](op-master.md) - [管理 ETCD 节点](op-etcd.md) - [升级 K8S 版本](upgrade.md) -- [修改多主集群VIP地址](ChangeVIP.md) - [修改AIO部署的系统IP](change_ip_allinone.md) - [替换集群使用的网络插件](change_k8s_network.md) - [集群备份与恢复](cluster_restore.md) diff --git a/docs/op/op-master.md b/docs/op/op-master.md index dc7780e..2823190 100644 --- a/docs/op/op-master.md +++ b/docs/op/op-master.md @@ -1,13 +1,13 @@ -# 管理 kube-master 节点 +# 管理 kube_master 节点 -## 1.增加 kube-master 节点 +## 1.增加 kube_master 节点 -新增`kube-master`节点大致流程为:tools/03.addmaster.yml +新增`kube_master`节点大致流程为:tools/03.addmaster.yml - [可选]新节点安装 chrony 时间同步 - 新节点预处理 prepare - 新节点安装 docker 服务 -- 新节点安装 kube-master 服务 -- 新节点安装 kube-node 服务 +- 新节点安装 kube_master 服务 +- 新节点安装 kube_node 服务 - 新节点安装网络插件相关 - 禁止业务 pod调度到新master节点 - 更新 node 节点 haproxy 负载均衡并重启 @@ -41,10 +41,10 @@ NAME STATUS ROLES AGE VERSION 192.168.1.11 Ready,SchedulingDisabled 2h v1.9.3 # 新增 master节点 ``` -## 2.删除 kube-master 节点 +## 2.删除 kube_master 节点 -删除`kube-master`节点大致流程为:tools/13.delmaster.yml +删除`kube_master`节点大致流程为:tools/13.delmaster.yml - 检测是否可以删除 - 迁移节点 pod - 删除 master 相关服务及文件 diff --git a/docs/op/op-node.md b/docs/op/op-node.md index 39da882..2fcd598 100644 --- a/docs/op/op-node.md +++ b/docs/op/op-node.md @@ -1,17 +1,17 @@ # 管理 node 节点 目录 -- 1.增加 kube-node 节点 +- 1.增加 kube_node 节点 - 2.增加非标准ssh端口节点 -- 3.删除 kube-node 节点 +- 3.删除 kube_node 节点 -## 1.增加 kube-node 节点 +## 1.增加 kube_node 节点 -新增`kube-node`节点大致流程为:tools/02.addnode.yml +新增`kube_node`节点大致流程为:tools/02.addnode.yml - [可选]新节点安装 chrony 时间同步 - 新节点预处理 prepare - 新节点安装 docker 服务 -- 新节点安装 kube-node 服务 +- 新节点安装 kube_node 服务 - 新节点安装网络插件相关 ### 操作步骤 @@ -39,13 +39,13 @@ $ kubectl get pod -n kube-system 目前 ezctl 暂不支持自动添加非标准 ssh 端口的节点,可以手动操作如下: - 假设待添加节点192.168.2.1,ssh 端口 10022;配置免密登录 ssh-copy-id -p 10022 192.168.2.1,按提示输入密码 -- 在 /etc/ansible/hosts文件 [kube-node] 组下添加一行: +- 在 /etc/ansible/hosts文件 [kube_node] 组下添加一行: ``` 192.168.2.1 ansible_ssh_port=10022 ``` - 最后执行 `ansible-playbook /etc/ansible/tools/02.addnode.yml -e NODE_TO_ADD=192.168.2.1` -## 3.删除 kube-node 节点 +## 3.删除 kube_node 节点 删除 node 节点流程:tools/12.delnode.yml - 检测是否可以删除 diff --git a/docs/release-notes/kubeasz-0.5.1.md b/docs/release-notes/kubeasz-0.5.1.md index 1643c15..86d0062 100644 --- a/docs/release-notes/kubeasz-0.5.1.md +++ b/docs/release-notes/kubeasz-0.5.1.md @@ -8,7 +8,7 @@ CHANGELOG: - 集群安装: - 更新 calico 3.3.2,并保留3.2.4可选 - 修复特定环境下lb节点变量LB_IF自动设置错误 - - 移除 kube-node csr 请求批准部分(PR #399) + - 移除 kube_node csr 请求批准部分(PR #399) - 添加支持 RedHat (PR #431) - 修改 docker 存储的目录设置(PR #436) - 更新 kube-schedule 监听参数 (PR #440) diff --git a/docs/release-notes/kubeasz-0.6.0.md b/docs/release-notes/kubeasz-0.6.0.md index 717f333..31609f0 100644 --- a/docs/release-notes/kubeasz-0.6.0.md +++ b/docs/release-notes/kubeasz-0.6.0.md @@ -13,7 +13,7 @@ CHANGELOG: - helm/tiller: v2.12.3 - 集群安装: - **增加添加/删除 etcd 节点**脚本和[文档](https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md) - - **增加可选配置附加负载均衡节点(ex-lb)**,可用于负载均衡 NodePort 方式暴露的服务 + - **增加可选配置附加负载均衡节点(ex_lb)**,可用于负载均衡 NodePort 方式暴露的服务 - 更新删除节点脚本和[文档](https://github.com/easzlab/kubeasz/blob/master/docs/op/del_one_node.md) - 优化增加 node 和增加 master 节点流程 - 更新 harbor 安装流程和文档 diff --git a/docs/release-notes/kubeasz-1.0.0.md b/docs/release-notes/kubeasz-1.0.0.md index 3e2c7cd..c19e7be 100644 --- a/docs/release-notes/kubeasz-1.0.0.md +++ b/docs/release-notes/kubeasz-1.0.0.md @@ -15,7 +15,7 @@ CHANGELOG: (0.6.x 版本以后) - 优化 ansible hosts 配置,更加精简、易用 - 废弃 new-node/new-master/new-etcd 主机组,对应功能已集成在 easzctl 命令行 - 废弃变量 K8S_VER,改为自动识别,避免手工配置错误 - - 迁移 basic_auth 相关配置至 roles:kube-master,增强初始安全性,且默认关闭apiserver的用户名/密码认证,详见 roles/kube-master/defaults/main.yml + - 迁移 basic_auth 相关配置至 roles:kube_master,增强初始安全性,且默认关闭apiserver的用户名/密码认证,详见 roles/kube-master/defaults/main.yml - easzctl 提供以下集群层面操作 - 切换/创建集群 context - 删除当前集群 diff --git a/docs/release-notes/kubeasz-2.0.0.md b/docs/release-notes/kubeasz-2.0.0.md index 3a15d1f..5796de2 100644 --- a/docs/release-notes/kubeasz-2.0.0.md +++ b/docs/release-notes/kubeasz-2.0.0.md @@ -10,11 +10,11 @@ CHANGELOG: - 集群安装: - 废弃 ansible hosts 中 deploy 角色,精简保留2个预定义节点规划例子(example/hosts.xx) - 重构 prepare 安装流程(删除 deploy 角色,移除 lb 节点创建) - - 调整 kube-master 安装流程 - - 调整 kube-node 安装流程(node 节点新增 haproxy 服务) + - 调整 kube_master 安装流程 + - 调整 kube_node 安装流程(node 节点新增 haproxy 服务) - 调整 network 等其他安装流程 - 精简 example hosts 配置文件及配置项 - - 调整 ex-lb 安装流程【可选】 + - 调整 ex_lb 安装流程【可选】 - 添加 docker/containerd 安装时互斥判断 - 新增 role: clean,重写清理脚本 99.clean.yml - 废弃 tools/clean_one_node.yml @@ -38,7 +38,7 @@ CHANGELOG: - node 节点安装文档 - ... - 集群操作管理文档更新(docs/op/op-index.md) - - 新增可选外部负载均衡文档(docs/setup/ex-lb.md) + - 新增可选外部负载均衡文档(docs/setup/ex_lb.md) - 新增容器化系统服务 haproxy/chrony 文档(docs/practice/dockerize_system_service.md) - 其他: - fix: 对已有集群进行安全加固时禁用 ip_forward 问题 diff --git a/docs/release-notes/kubeasz-2.0.2.md b/docs/release-notes/kubeasz-2.0.2.md index 62ecbbd..7fb36d3 100644 --- a/docs/release-notes/kubeasz-2.0.2.md +++ b/docs/release-notes/kubeasz-2.0.2.md @@ -24,7 +24,7 @@ CHANGELOG: - new logo - fix: 执行roles/cluster-storage/cluster-storage.yml 报错不存在`deploy` - fix: 部分os启用kube-reserved出错(提示/sys/fs/cgroup只读) - - fix: ex-lb 组少量 keepalived 相关配置 + - fix: ex_lb 组少量 keepalived 相关配置 - fix: 偶然出现docker安装时提示找不到变量`docker_ver` - fix: Ubuntu1804 pod内dns解析不到外网 - fix: k8s 相关服务在接收SIGPIPE信号停止后不重启问题 #631 thx to gj19910723 diff --git a/docs/release-notes/kubeasz-2.2.0.md b/docs/release-notes/kubeasz-2.2.0.md index d44ed57..5203d50 100644 --- a/docs/release-notes/kubeasz-2.2.0.md +++ b/docs/release-notes/kubeasz-2.2.0.md @@ -23,7 +23,7 @@ CHANGELOG: - easzup: 修复安装 docker 逻辑 aa76da0f2ee2b01d47c28667feed36b6be778b17 - 其他 - fix: dashboard生成cluster-service #739 - - fix: ubuntu1804安装ex-lb失败问题 + - fix: ubuntu1804安装ex_lb失败问题 - fix: calico的BGP RR模式下的bgppeer的nodeSelector错误 #741 - fix: ectd集群有不正常节点时增/删etcd节点失败 #743 - fix: kube-router 安装报错 #783 diff --git a/docs/setup/00-planning_and_overall_intro.md b/docs/setup/00-planning_and_overall_intro.md index 09e8640..1f1afa6 100644 --- a/docs/setup/00-planning_and_overall_intro.md +++ b/docs/setup/00-planning_and_overall_intro.md @@ -98,7 +98,6 @@ chmod +x ./ezdown ezctl new k8s-01 2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01 2021-01-19 10:48:23 DEBUG set version of common plugins -2021-01-19 10:48:23 DEBUG disable registry mirrors 2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created. 2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts' 2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml' diff --git a/docs/setup/04-install_kube_master.md b/docs/setup/04-install_kube_master.md index d92f38a..16fc6e8 100644 --- a/docs/setup/04-install_kube_master.md +++ b/docs/setup/04-install_kube_master.md @@ -1,4 +1,4 @@ -## 04-安装kube-master节点 +## 04-安装kube_master节点 部署master节点主要包含三个组件`apiserver` `scheduler` `controller-manager`,其中: @@ -39,8 +39,8 @@ roles/kube-master/ "CN": "kubernetes", "hosts": [ "127.0.0.1", -{% if groups['ex-lb']|length > 0 %} - "{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}", +{% if groups['ex_lb']|length > 0 %} + "{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}", {% endif %} "{{ inventory_hostname }}", "{{ CLUSTER_KUBERNETES_SVC_IP }}", @@ -69,7 +69,7 @@ roles/kube-master/ } ``` - kubernetes 证书既是服务器证书,同时apiserver又作为客户端证书去访问etcd 集群;作为服务器证书需要设置hosts 指定使用该证书的IP 或域名列表,需要注意的是: - - 如果配置 ex-lb,需要把 EX_APISERVER_VIP 也配置进去 + - 如果配置 ex_lb,需要把 EX_APISERVER_VIP 也配置进去 - 如果需要外部访问 apiserver,需要在 defaults/main.yml 配置 MASTER_CERT_HOSTS - `kubectl get svc` 将看到集群中由api-server 创建的默认服务 `kubernetes`,因此也要把 `kubernetes` 服务名和各个服务域名也添加进去 @@ -207,10 +207,10 @@ WantedBy=multi-user.target ``` bash # vi 04.kube-master.yml -- hosts: kube-master +- hosts: kube_master roles: - - kube-master - - kube-node + - kube_master + - kube_node # 禁止业务 pod调度到 master节点 tasks: - name: 禁止业务 pod调度到 master节点 diff --git a/docs/setup/05-install_kube_node.md b/docs/setup/05-install_kube_node.md index 353635a..d721ed3 100644 --- a/docs/setup/05-install_kube_node.md +++ b/docs/setup/05-install_kube_node.md @@ -1,9 +1,9 @@ -## 05-安装kube-node节点 +## 05-安装kube_node节点 -`kube-node` 是集群中运行工作负载的节点,前置条件需要先部署好`kube-master`节点,它需要部署如下组件: +`kube_node` 是集群中运行工作负载的节点,前置条件需要先部署好`kube_master`节点,它需要部署如下组件: + docker:运行容器 -+ kubelet: kube-node上最主要的组件 ++ kubelet: kube_node上最主要的组件 + kube-proxy: 发布应用服务与负载均衡 + haproxy:用于请求转发到多个 apiserver,详见[HA-2x 架构](00-planning_and_overall_intro.md#ha-architecture) + calico: 配置容器网络 (或者其他网络组件) diff --git a/docs/setup/07-install_cluster_addon.md b/docs/setup/07-install_cluster_addon.md index 539356c..f8d1d95 100644 --- a/docs/setup/07-install_cluster_addon.md +++ b/docs/setup/07-install_cluster_addon.md @@ -13,5 +13,5 @@ ## 下一步 -- [创建ex-lb节点组](ex-lb.md), 向集群外提供高可用apiserver +- [创建ex_lb节点组](ex_lb.md), 向集群外提供高可用apiserver - [创建集群持久化存储](08-cluster-storage.md) diff --git a/docs/setup/ex-lb.md b/docs/setup/ex-lb.md index c0fd052..352e3a2 100644 --- a/docs/setup/ex-lb.md +++ b/docs/setup/ex-lb.md @@ -1,32 +1,17 @@ ## EX-LB 负载均衡部署 -根据[HA 2x架构](00-planning_and_overall_intro.md),k8s集群自身高可用已经不依赖于外部 lb 服务;但是有时我们要从外部访问 apiserver(比如 CI 流程),就需要 ex-lb 来请求多个 apiserver; +根据[HA 2x架构](00-planning_and_overall_intro.md),k8s集群自身高可用已经不依赖于外部 lb 服务;但是有时我们要从外部访问 apiserver(比如 CI 流程),就需要 ex_lb 来请求多个 apiserver; -还有一种情况是需要[负载转发到ingress服务](../op/loadballance_ingress_nodeport.md),也需要部署ex-lb; +还有一种情况是需要[负载转发到ingress服务](../op/loadballance_ingress_nodeport.md),也需要部署ex_lb; -**注意:当遇到公有云环境无法自建 ex-lb 服务时,可以配置对应的云负载均衡服务** +**注意:当遇到公有云环境无法自建 ex_lb 服务时,可以配置对应的云负载均衡服务** -### ex-lb 服务组件 +### ex_lb 服务组件 -ex-lb 服务由 keepalived 和 haproxy 组成: +ex_lb 服务由 keepalived 和 haproxy 组成: - haproxy:高效代理(四层模式)转发到多个 apiserver - keepalived:利用主备节点vrrp协议通信和虚拟地址,消除haproxy的单点故障 -``` bash -roles/ex-lb/ -├── clean-ex-lb.yml -├── defaults -│   └── main.yml -├── ex-lb.yml -├── tasks -│   └── main.yml -└── templates - ├── haproxy.cfg.j2 - ├── haproxy.service.j2 - ├── keepalived-backup.conf.j2 - └── keepalived-master.conf.j2 -``` - Haproxy支持四层和七层负载,稳定性好,根据官方文档,HAProxy可以跑满10Gbps-New benchmark of HAProxy at 10 Gbps using Myricom's 10GbE NICs (Myri-10G PCI-Express);另外,openstack高可用也有用haproxy的。 keepalived观其名可知,保持存活,它是基于VRRP协议保证所谓的高可用或热备的,这里用来预防haproxy的单点故障。 @@ -44,12 +29,12 @@ keepalived与haproxy配合,实现master的高可用过程如下: #### 配置haproxy (roles/ex-lb/templates/haproxy.cfg.j2) 配置由全局配置和三个listen配置组成: -- listen kube-master 用于转发至多个apiserver +- listen kube_master 用于转发至多个apiserver - listen ingress-node 用于转发至node节点的ingress http服务,[参阅](../op/loadballance_ingress_nodeport.md) - listen ingress-node-tls 用于转发至node节点的ingress https服务 如果用apt安装的话,可以在/usr/share/doc/haproxy目录下找到配置指南configuration.txt.gz,全局和默认配置这里不展开,关注`listen` 代理设置模块,各项配置说明: -+ 名称 kube-master ++ 名称 kube_master + bind 监听客户端请求的地址/端口,保证监听master的VIP地址和端口 + mode 选择四层负载模式 (当然你也可以选择七层负载,请查阅指南,适当调整) + balance 选择负载算法 (负载算法也有很多供选择) @@ -71,12 +56,12 @@ vrrp_script check-haproxy { weight -60 } -vrrp_instance VI-kube-master { +vrrp_instance VI-kube_master { state MASTER priority 120 unicast_src_ip {{ inventory_hostname }} unicast_peer { -{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %} +{% for h in groups['ex_lb'] %}{% if h != inventory_hostname %} {{ h }} {% endif %}{% endfor %} } diff --git a/docs/setup/ezctl.md b/docs/setup/ezctl.md index 1107fdc..6807884 100644 --- a/docs/setup/ezctl.md +++ b/docs/setup/ezctl.md @@ -68,12 +68,11 @@ Use "ezctl help " for more information about a given command. ~# ezctl new k8s-01 2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01 2021-01-19 10:48:23 DEBUG set version of common plugins -2021-01-19 10:48:23 DEBUG disable registry mirrors 2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created. 2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts' 2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml' ``` -然后根据提示配置'/etc/kubeasz/clusters/k8s-01/hosts' 和 '/etc/kubeasz/clusters/k8s-01/config.yml';为方便测试我们在hosts里面设置单节点集群(etcd/kube-master/kube-node配置同一个节点,注意节点需先设置ssh免密码登陆), config.yml 使用默认配置即可。 +然后根据提示配置'/etc/kubeasz/clusters/k8s-01/hosts' 和 '/etc/kubeasz/clusters/k8s-01/config.yml';为方便测试我们在hosts里面设置单节点集群(etcd/kube_master/kube_node配置同一个节点,注意节点需先设置ssh免密码登陆), config.yml 使用默认配置即可。 - 2.然后开始安装集群 diff --git a/docs/setup/network-plugin/calico.md b/docs/setup/network-plugin/calico.md index 42e9cb4..70d91ae 100644 --- a/docs/setup/network-plugin/calico.md +++ b/docs/setup/network-plugin/calico.md @@ -60,9 +60,9 @@ roles/calico/ + 安装前检查主机名不能有大写字母,只能由`小写字母` `-` `.` 组成 (name must consist of lower case alphanumeric characters, '-' or '.' (regex: [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*))(calico-node v3.0.6以上已经解决主机大写字母问题) + **安装前必须确保各节点主机名不重复** ,calico node name 由节点主机名决定,如果重复,那么重复节点在etcd中只存储一份配置,BGP 邻居也不会建立。 -+ 安装之前必须确保`kube-master`和`kube-node`节点已经成功部署 ++ 安装之前必须确保`kube_master`和`kube_node`节点已经成功部署 + 只需要在任意装有kubectl客户端的节点运行 `kubectl apply -f`安装即可 -+ 等待15s后(视网络拉取calico相关镜像速度),calico 网络插件安装完成,删除之前kube-node安装时默认cni网络配置 ++ 等待15s后(视网络拉取calico相关镜像速度),calico 网络插件安装完成,删除之前kube_node安装时默认cni网络配置 ### [可选]配置calicoctl工具 [calicoctl.cfg.j2](roles/calico/templates/calicoctl.cfg.j2) diff --git a/docs/setup/network-plugin/flannel.md b/docs/setup/network-plugin/flannel.md index 4d7cae4..7fc1eb7 100644 --- a/docs/setup/network-plugin/flannel.md +++ b/docs/setup/network-plugin/flannel.md @@ -86,9 +86,9 @@ FLANNEL_IPMASQ=true ``` ### 安装 flannel网络 -+ 安装之前必须确保kube-master和kube-node节点已经成功部署 ++ 安装之前必须确保kube_master和kube_node节点已经成功部署 + 只需要在任意装有kubectl客户端的节点运行 kubectl create安装即可 -+ 等待15s后(视网络拉取相关镜像速度),flannel 网络插件安装完成,删除之前kube-node安装时默认cni网络配置 ++ 等待15s后(视网络拉取相关镜像速度),flannel 网络插件安装完成,删除之前kube_node安装时默认cni网络配置 ### 验证flannel网络 diff --git a/example/config.yml b/example/config.yml index 3219863..ac08234 100644 --- a/example/config.yml +++ b/example/config.yml @@ -113,7 +113,7 @@ flannel_offline: "flannel_{{ flannelVer }}.tar" CALICO_IPV4POOL_IPIP: "Always" # [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现 -IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}" +IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}" # [calico]设置calico 网络 backend: brid, vxlan, none CALICO_NETWORKING_BACKEND: "brid" @@ -139,7 +139,7 @@ cilium_offline: "cilium_{{ cilium_ver }}.tar" # ------------------------------------------- kube-ovn # [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点 -OVN_DB_NODE: "{{ groups['kube-master'][0] }}" +OVN_DB_NODE: "{{ groups['kube_master'][0] }}" # [kube-ovn]离线镜像tar包 kube_ovn_ver: "__kube_ovn__" diff --git a/example/hosts.allinone b/example/hosts.allinone index e99361b..e310395 100644 --- a/example/hosts.allinone +++ b/example/hosts.allinone @@ -3,11 +3,11 @@ 192.168.1.1 # master node(s) -[kube-master] +[kube_master] 192.168.1.1 # work node(s) -[kube-node] +[kube_node] 192.168.1.1 # [optional] harbor server, a private docker registry @@ -17,7 +17,7 @@ #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes # [optional] loadbalance for accessing k8s from outside -[ex-lb] +[ex_lb] #192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 #192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 diff --git a/example/hosts.multi-node b/example/hosts.multi-node index 0c538fa..c4977d3 100644 --- a/example/hosts.multi-node +++ b/example/hosts.multi-node @@ -5,12 +5,12 @@ 192.168.1.3 # master node(s) -[kube-master] +[kube_master] 192.168.1.1 192.168.1.2 # work node(s) -[kube-node] +[kube_node] 192.168.1.3 192.168.1.4 @@ -21,7 +21,7 @@ #192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes # [optional] loadbalance for accessing k8s from outside -[ex-lb] +[ex_lb] #192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 #192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443 diff --git a/ezctl b/ezctl index e5ccc3c..02731a3 100755 --- a/ezctl +++ b/ezctl @@ -294,10 +294,10 @@ function add-node() { [[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; } # check if the new node already exsited - sed -n '/^\[kube-master/,/^\[harbor/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "node $2 already existed in $BASE/clusters/$1/hosts"; return 2; } + sed -n '/^\[kube_master/,/^\[harbor/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "node $2 already existed in $BASE/clusters/$1/hosts"; return 2; } - logger info "add $2 into 'kube-node' group" - sed -i "/\[kube-node/a $2 NEW_NODE=yes ${@:3}" "$BASE/clusters/$1/hosts" + logger info "add $2 into 'kube_node' group" + sed -i "/\[kube_node/a $2 NEW_NODE=yes ${@:3}" "$BASE/clusters/$1/hosts" logger info "start to add a work node:$2 into cluster:$1" ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/22.addnode.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml" @@ -308,15 +308,15 @@ function add-master() { [[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; } # check if the new master already exsited - sed -n '/^\[kube-master/,/^\[kube-node/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "master $2 already existed!"; return 2; } + sed -n '/^\[kube_master/,/^\[kube_node/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "master $2 already existed!"; return 2; } - logger info "add $2 into 'kube-master' group" - sed -i "/\[kube-master/a $2 NEW_MASTER=yes ${@:3}" "$BASE/clusters/$1/hosts" + logger info "add $2 into 'kube_master' group" + sed -i "/\[kube_master/a $2 NEW_MASTER=yes ${@:3}" "$BASE/clusters/$1/hosts" logger info "start to add a master node:$2 into cluster:$1" ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/23.addmaster.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml" - logger info "reconfigure and restart the haproxy service on 'kube-node' nodes" + logger info "reconfigure and restart the haproxy service on 'kube_node' nodes" ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml" } @@ -325,7 +325,7 @@ function add-etcd() { [[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; } # check if the new node already exsited - sed -n '/^\[etcd/,/^\[kube-master/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "etcd $2 already existed!"; return 2; } + sed -n '/^\[etcd/,/^\[kube_master/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "etcd $2 already existed!"; return 2; } logger info "add $2 into 'etcd' group" sed -i "/\[etcd/a $2 NEW_ETCD=yes ${@:3}" "$BASE/clusters/$1/hosts" @@ -372,7 +372,7 @@ function del-master() { logger info "reconfig kubeconfig in ansible manage node" ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/roles/deploy/deploy.yml" -t create_kctl_cfg -e "@clusters/$1/config.yml" - logger info "reconfigure and restart the haproxy service on 'kube-node' nodes" + logger info "reconfigure and restart the haproxy service on 'kube_node' nodes" ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml" } diff --git a/playbooks/01.prepare.yml b/playbooks/01.prepare.yml index 2295065..66c3b43 100644 --- a/playbooks/01.prepare.yml +++ b/playbooks/01.prepare.yml @@ -1,9 +1,9 @@ # [optional] to synchronize system time of nodes with 'chrony' - hosts: - - kube-master - - kube-node + - kube_master + - kube_node - etcd - - ex-lb + - ex_lb - chrony roles: - { role: chrony, when: "groups['chrony']|length > 0" } @@ -15,8 +15,8 @@ # prepare tasks for all nodes - hosts: - - kube-master - - kube-node + - kube_master + - kube_node - etcd roles: - prepare diff --git a/playbooks/03.runtime.yml b/playbooks/03.runtime.yml index d0eaa55..2d9a8cd 100644 --- a/playbooks/03.runtime.yml +++ b/playbooks/03.runtime.yml @@ -1,7 +1,7 @@ # to install a container runtime - hosts: - - kube-master - - kube-node + - kube_master + - kube_node roles: - { role: docker, when: "CONTAINER_RUNTIME == 'docker'" } - { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" } diff --git a/playbooks/04.kube-master.yml b/playbooks/04.kube-master.yml index 5de1714..e8812ce 100644 --- a/playbooks/04.kube-master.yml +++ b/playbooks/04.kube-master.yml @@ -1,12 +1,12 @@ -# to set up 'kube-master' nodes -- hosts: kube-master +# to set up 'kube_master' nodes +- hosts: kube_master roles: - kube-master - kube-node tasks: - name: Making master nodes SchedulingDisabled shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} " - when: "inventory_hostname not in groups['kube-node']" + when: "inventory_hostname not in groups['kube_node']" ignore_errors: true - name: Setting master role name diff --git a/playbooks/05.kube-node.yml b/playbooks/05.kube-node.yml index caf2a95..933385f 100644 --- a/playbooks/05.kube-node.yml +++ b/playbooks/05.kube-node.yml @@ -1,4 +1,4 @@ -# to set up 'kube-node' nodes -- hosts: kube-node +# to set up 'kube_node' nodes +- hosts: kube_node roles: - - { role: kube-node, when: "inventory_hostname not in groups['kube-master']" } + - { role: kube-node, when: "inventory_hostname not in groups['kube_master']" } diff --git a/playbooks/06.network.yml b/playbooks/06.network.yml index c6e5e27..339dc99 100644 --- a/playbooks/06.network.yml +++ b/playbooks/06.network.yml @@ -1,7 +1,7 @@ # to install network plugin, only one can be choosen - hosts: - - kube-master - - kube-node + - kube_master + - kube_node roles: - { role: calico, when: "CLUSTER_NETWORK == 'calico'" } - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" } diff --git a/playbooks/07.cluster-addon.yml b/playbooks/07.cluster-addon.yml index efe756f..6f29173 100644 --- a/playbooks/07.cluster-addon.yml +++ b/playbooks/07.cluster-addon.yml @@ -1,5 +1,5 @@ # to install clust-addons - hosts: - - kube-node + - kube_node roles: - cluster-addon diff --git a/playbooks/11.harbor.yml b/playbooks/11.harbor.yml index 76bf37e..f86af13 100644 --- a/playbooks/11.harbor.yml +++ b/playbooks/11.harbor.yml @@ -16,8 +16,8 @@ when: hostvars[groups.harbor[0]]['SELF_SIGNED_CERT'] == 'yes' - hosts: - - kube-master - - kube-node + - kube_master + - kube_node tasks: - name: Define 'harbor_hostname', a domain name set_fact: harbor_hostname={{ hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] }} diff --git a/playbooks/23.addmaster.yml b/playbooks/23.addmaster.yml index dfc77aa..454b340 100644 --- a/playbooks/23.addmaster.yml +++ b/playbooks/23.addmaster.yml @@ -17,7 +17,7 @@ tasks: - name: Making master nodes SchedulingDisabled shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} " - when: "inventory_hostname not in groups['kube-node']" + when: "inventory_hostname not in groups['kube_node']" ignore_errors: true - name: Setting master role name diff --git a/playbooks/31.deletcd.yml b/playbooks/31.deletcd.yml index bc00ffd..3a86bc9 100644 --- a/playbooks/31.deletcd.yml +++ b/playbooks/31.deletcd.yml @@ -73,7 +73,7 @@ # lineinfile is inadequate to delete lines between some specific line range - name: remove the etcd's node entry in hosts - shell: 'sed -i "/^\[etcd/,/^\[kube-master/ {/^{{ ETCD_TO_DEL }}[^0-9]/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts' + shell: 'sed -i "/^\[etcd/,/^\[kube_master/ {/^{{ ETCD_TO_DEL }}[^0-9]/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts' args: warn: false when: "groups['etcd']|length > 1 and ETCD_TO_DEL in groups['etcd']" diff --git a/playbooks/32.delnode.yml b/playbooks/32.delnode.yml index 8b3ca35..231b9fb 100644 --- a/playbooks/32.delnode.yml +++ b/playbooks/32.delnode.yml @@ -2,8 +2,8 @@ - hosts: localhost tasks: - - fail: msg="you CAN NOT delete the last member of kube-master!" - when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']" + - fail: msg="you CAN NOT delete the last member of kube_master!" + when: "groups['kube_master']|length < 2 and NODE_TO_DEL in groups['kube_master']" - name: run kubectl drain @{{ NODE_TO_DEL }} shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force" @@ -26,6 +26,6 @@ # lineinfile is inadequate to delete lines between some specific line range - name: remove the node's entry in hosts - shell: 'sed -i "/^\[kube-node/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts' + shell: 'sed -i "/^\[kube_node/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts' args: warn: false diff --git a/playbooks/33.delmaster.yml b/playbooks/33.delmaster.yml index a6c17c6..49611e4 100644 --- a/playbooks/33.delmaster.yml +++ b/playbooks/33.delmaster.yml @@ -1,9 +1,9 @@ -# WARNNING: this playbook will clean the kube-master node {{ NODE_TO_DEL }} +# WARNNING: this playbook will clean the kube_master node {{ NODE_TO_DEL }} - hosts: localhost tasks: - - fail: msg="you CAN NOT delete the last member of kube-master!" - when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']" + - fail: msg="you CAN NOT delete the last member of kube_master!" + when: "groups['kube_master']|length < 2 and NODE_TO_DEL in groups['kube_master']" - name: run kubectl drain @{{ NODE_TO_DEL }} shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force" @@ -27,6 +27,6 @@ # lineinfile is inadequate to delete lines between some specific line range - name: remove the master's entry in hosts - shell: 'sed -i "/^\[kube-master/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts' + shell: 'sed -i "/^\[kube_master/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts' args: warn: false diff --git a/playbooks/90.setup.yml b/playbooks/90.setup.yml index 12edc16..d27c5f5 100644 --- a/playbooks/90.setup.yml +++ b/playbooks/90.setup.yml @@ -1,9 +1,9 @@ # [optional] to synchronize time of nodes with 'chrony' - hosts: - - kube-master - - kube-node + - kube_master + - kube_node - etcd - - ex-lb + - ex_lb - chrony roles: - { role: chrony, when: "groups['chrony']|length > 0" } @@ -15,8 +15,8 @@ # prepare tasks for all nodes - hosts: - - kube-master - - kube-node + - kube_master + - kube_node - etcd roles: - prepare @@ -28,36 +28,36 @@ # to install container runtime - hosts: - - kube-master - - kube-node + - kube_master + - kube_node roles: - { role: docker, when: "CONTAINER_RUNTIME == 'docker'" } - { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" } -# to set up 'kube-master' nodes -- hosts: kube-master +# to set up 'kube_master' nodes +- hosts: kube_master roles: - kube-master - kube-node tasks: - name: Making master nodes SchedulingDisabled shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} " - when: "inventory_hostname not in groups['kube-node']" + when: "inventory_hostname not in groups['kube_node']" ignore_errors: true - name: Setting master role name shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite" ignore_errors: true -# to set up 'kube-node' nodes -- hosts: kube-node +# to set up 'kube_node' nodes +- hosts: kube_node roles: - - { role: kube-node, when: "inventory_hostname not in groups['kube-master']" } + - { role: kube-node, when: "inventory_hostname not in groups['kube_master']" } # to install network plugin, only one can be choosen - hosts: - - kube-master - - kube-node + - kube_master + - kube_node roles: - { role: calico, when: "CLUSTER_NETWORK == 'calico'" } - { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" } @@ -67,6 +67,6 @@ # to install cluster-addons - hosts: - - kube-node + - kube_node roles: - cluster-addon diff --git a/playbooks/91.start.yml b/playbooks/91.start.yml index 2df0787..defaf9a 100644 --- a/playbooks/91.start.yml +++ b/playbooks/91.start.yml @@ -3,9 +3,9 @@ - name: starting etcd cluster service: name=etcd state=started enabled=yes -- hosts: kube-master +- hosts: kube_master tasks: - - name: starting kube-master services + - name: starting kube_master services service: name={{ item }} state=started enabled=yes with_items: - kube-apiserver @@ -13,8 +13,8 @@ - kube-scheduler - hosts: - - kube-master - - kube-node + - kube_master + - kube_node tasks: - name: starting docker service: name=docker state=started enabled=yes @@ -24,19 +24,19 @@ service: name=containerd state=started enabled=yes when: "CONTAINER_RUNTIME == 'containerd'" - - name: starting haproxy on kube-node + - name: starting haproxy on kube_node service: name=haproxy state=started enabled=yes when: - - "inventory_hostname not in groups['kube-master']" - - "groups['kube-master']|length > 1" + - "inventory_hostname not in groups['kube_master']" + - "groups['kube_master']|length > 1" - - name: starting kube-node services + - name: starting kube_node services service: name={{ item }} state=started enabled=yes with_items: - kubelet - kube-proxy -- hosts: ex-lb +- hosts: ex_lb tasks: - name: starting external loadbalance service: name={{ item }} state=started enabled=yes diff --git a/playbooks/92.stop.yml b/playbooks/92.stop.yml index 8cb7ab9..b60ce78 100644 --- a/playbooks/92.stop.yml +++ b/playbooks/92.stop.yml @@ -1,6 +1,6 @@ -- hosts: kube-master +- hosts: kube_master tasks: - - name: stopping kube-master services + - name: stopping kube_master services service: name={{ item }} state=stopped enabled=no with_items: - kube-apiserver @@ -12,7 +12,7 @@ - name: stopping etcd cluster service: name=etcd state=stopped enabled=no -- hosts: ex-lb +- hosts: ex_lb tasks: - name: stopping external loadbalance service: name={{ item }} state=stopped enabled=no @@ -21,16 +21,16 @@ - keepalived - hosts: - - kube-master - - kube-node + - kube_master + - kube_node tasks: - - name: stopping haproxy on kube-node + - name: stopping haproxy on kube_node service: name=haproxy state=stopped enabled=no when: - - "inventory_hostname not in groups['kube-master']" - - "groups['kube-master']|length > 1" + - "inventory_hostname not in groups['kube_master']" + - "groups['kube_master']|length > 1" - - name: stopping kube-node services + - name: stopping kube_node services service: name={{ item }} state=stopped enabled=no with_items: - kubelet diff --git a/playbooks/93.upgrade.yml b/playbooks/93.upgrade.yml index 79b65f4..919310d 100644 --- a/playbooks/93.upgrade.yml +++ b/playbooks/93.upgrade.yml @@ -3,7 +3,7 @@ # Usage: ezctl upgrade # check k8s version -- hosts: kube-master +- hosts: kube_master tasks: - name: get running k8s version shell: "{{ bin_dir }}/kube-apiserver --version" @@ -30,13 +30,13 @@ # update masters - hosts: - - kube-master + - kube_master roles: - kube-master - kube-node # update nodes - hosts: - - kube-node + - kube_node roles: - - { role: kube-node, when: "inventory_hostname not in groups['kube-master']" } + - { role: kube-node, when: "inventory_hostname not in groups['kube_master']" } diff --git a/playbooks/99.clean.yml b/playbooks/99.clean.yml index 24ea2c9..22063c7 100644 --- a/playbooks/99.clean.yml +++ b/playbooks/99.clean.yml @@ -2,9 +2,9 @@ # Make sure you know what you are doing. - hosts: - - kube-master - - kube-node - - ex-lb + - kube_master + - kube_node + - ex_lb - etcd vars: DEL_MASTER: "yes" diff --git a/roles/chrony/chrony.yml b/roles/chrony/chrony.yml index 0887931..8868379 100644 --- a/roles/chrony/chrony.yml +++ b/roles/chrony/chrony.yml @@ -1,8 +1,8 @@ - hosts: - - kube-master - - kube-node + - kube_master + - kube_node - etcd - - ex-lb + - ex_lb - chrony roles: - { role: chrony, when: "groups['chrony']|length > 0" } diff --git a/roles/cilium/cilium.yml b/roles/cilium/cilium.yml index 5e84dbb..1a8529c 100644 --- a/roles/cilium/cilium.yml +++ b/roles/cilium/cilium.yml @@ -1,5 +1,5 @@ - hosts: - - kube-master - - kube-node + - kube_master + - kube_node roles: - cilium diff --git a/roles/clean/defaults/main.yml b/roles/clean/defaults/main.yml index d92f3d3..8207962 100644 --- a/roles/clean/defaults/main.yml +++ b/roles/clean/defaults/main.yml @@ -1,7 +1,7 @@ -# 是否删除 kube-master 相关服务 +# 是否删除 kube_master 相关服务 DEL_MASTER: "no" -# 是否删除 kube-node 相关服务 +# 是否删除 kube_node 相关服务 DEL_NODE: "no" # 是否删除 etc 相关服务 diff --git a/roles/clean/tasks/clean_lb.yml b/roles/clean/tasks/clean_lb.yml index 5acca40..ba6fe15 100644 --- a/roles/clean/tasks/clean_lb.yml +++ b/roles/clean/tasks/clean_lb.yml @@ -12,4 +12,4 @@ with_items: - "/etc/haproxy" - "/etc/keepalived" - when: "inventory_hostname in groups['kube-node'] or inventory_hostname in groups['ex-lb']" + when: "inventory_hostname in groups['kube_node'] or inventory_hostname in groups['ex_lb']" diff --git a/roles/clean/tasks/clean_master.yml b/roles/clean/tasks/clean_master.yml index 682b1bf..0fb90ab 100644 --- a/roles/clean/tasks/clean_master.yml +++ b/roles/clean/tasks/clean_master.yml @@ -1,18 +1,18 @@ -# to clean 'kube-master' nodes -- name: stop and disable kube-master service +# to clean 'kube_master' nodes +- name: stop and disable kube_master service service: name={{ item }} state=stopped enabled=no with_items: - kube-apiserver - kube-controller-manager - kube-scheduler ignore_errors: true - when: "inventory_hostname in groups['kube-master']" + when: "inventory_hostname in groups['kube_master']" -- name: remove files and dirs of 'kube-master' nodes +- name: remove files and dirs of 'kube_master' nodes file: name={{ item }} state=absent with_items: - "/var/run/kubernetes" - "/etc/systemd/system/kube-apiserver.service" - "/etc/systemd/system/kube-controller-manager.service" - "/etc/systemd/system/kube-scheduler.service" - when: "inventory_hostname in groups['kube-master']" + when: "inventory_hostname in groups['kube_master']" diff --git a/roles/clean/tasks/clean_node.yml b/roles/clean/tasks/clean_node.yml index 85f06c2..e22f98d 100644 --- a/roles/clean/tasks/clean_node.yml +++ b/roles/clean/tasks/clean_node.yml @@ -1,6 +1,6 @@ -# to clean 'kube-node' nodes +# to clean 'kube_node' nodes - block: - - name: stop and disable kube-node service + - name: stop and disable kube_node service service: name={{ item }} state=stopped enabled=no with_items: - kubelet @@ -13,7 +13,7 @@ warn: false ignore_errors: true - - name: remove files and dirs of 'kube-node' nodes + - name: remove files and dirs of 'kube_node' nodes file: name={{ item }} state=absent with_items: - "/var/lib/kubelet/" @@ -160,4 +160,4 @@ # && iptables -F -t raw && iptables -X -t raw \ # && iptables -F -t mangle && iptables -X -t mangle" - when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']" + when: "inventory_hostname in groups['kube_master'] or inventory_hostname in groups['kube_node']" diff --git a/roles/cluster-addon/templates/prometheus/values.yaml.j2 b/roles/cluster-addon/templates/prometheus/values.yaml.j2 index eb13c78..afd3f6b 100644 --- a/roles/cluster-addon/templates/prometheus/values.yaml.j2 +++ b/roles/cluster-addon/templates/prometheus/values.yaml.j2 @@ -58,7 +58,7 @@ kubelet: kubeControllerManager: enabled: true endpoints: -{% for h in groups['kube-master'] %} +{% for h in groups['kube_master'] %} - {{ h }} {% endfor %} @@ -89,7 +89,7 @@ kubeEtcd: kubeScheduler: enabled: true endpoints: -{% for h in groups['kube-master'] %} +{% for h in groups['kube_master'] %} - {{ h }} {% endfor %} @@ -97,11 +97,11 @@ kubeScheduler: kubeProxy: enabled: true endpoints: -{% for h in groups['kube-master'] %} +{% for h in groups['kube_master'] %} - {{ h }} {% endfor %} -{% for h in groups['kube-node'] %} -{% if h not in groups['kube-master'] %} +{% for h in groups['kube_node'] %} +{% if h not in groups['kube_master'] %} - {{ h }} {% endif %} {% endfor %} diff --git a/roles/deploy/tasks/main.yml b/roles/deploy/tasks/main.yml index 30c55a1..056905d 100644 --- a/roles/deploy/tasks/main.yml +++ b/roles/deploy/tasks/main.yml @@ -46,8 +46,11 @@ - import_tasks: create-kube-scheduler-kubeconfig.yml # ansible 控制端一些易用性配置 -- name: 本地创建 ezctl 工具的软连接 - file: src={{ base_dir }}/ezctl dest=/usr/bin/ezctl state=link +- name: 本地创建 ezdown/ezctl 工具的软连接 + file: src={{ base_dir }}/{{ item }} dest=/usr/bin/{{ item }} state=link + with_items: + - ezdown + - ezctl - name: ansible 控制端创建 kubectl 软链接 file: src={{ base_dir }}/bin/kubectl dest=/usr/bin/kubectl state=link diff --git a/roles/deploy/vars/main.yml b/roles/deploy/vars/main.yml index d1e0eaa..9a8b00a 100644 --- a/roles/deploy/vars/main.yml +++ b/roles/deploy/vars/main.yml @@ -1,5 +1,5 @@ # apiserver 默认第一个master节点 -KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443" +KUBE_APISERVER: "https://{{ groups['kube_master'][0] }}:6443" # ADD_KCFG: false diff --git a/roles/ex-lb/clean-ex-lb.yml b/roles/ex-lb/clean-ex-lb.yml index b24972c..804354d 100644 --- a/roles/ex-lb/clean-ex-lb.yml +++ b/roles/ex-lb/clean-ex-lb.yml @@ -1,5 +1,5 @@ - hosts: - - ex-lb + - ex_lb tasks: - block: - name: stop and disable chrony in Ubuntu diff --git a/roles/ex-lb/ex-lb.yml b/roles/ex-lb/ex-lb.yml index e351ace..7b9b256 100644 --- a/roles/ex-lb/ex-lb.yml +++ b/roles/ex-lb/ex-lb.yml @@ -1,3 +1,3 @@ -- hosts: ex-lb +- hosts: ex_lb roles: - ex-lb diff --git a/roles/ex-lb/tasks/main.yml b/roles/ex-lb/tasks/main.yml index 9418ae4..2401130 100644 --- a/roles/ex-lb/tasks/main.yml +++ b/roles/ex-lb/tasks/main.yml @@ -1,7 +1,7 @@ -# ex-lb 节点成员不能同时是 kube-node 节点,因为它们都需要安装 haproxy +# ex_lb 节点成员不能同时是 kube_node 节点,因为它们都需要安装 haproxy - name: fail info1 - fail: msg="an 'ex-lb' node CAN NOT be a 'kube-node' node at the same time" - when: "inventory_hostname in groups['kube-node']" + fail: msg="an 'ex_lb' node CAN NOT be a 'kube_node' node at the same time" + when: "inventory_hostname in groups['kube_node']" # 自动设置LB节点变量'LB_IF' - name: 注册变量 LB_IF_TMP diff --git a/roles/ex-lb/templates/haproxy.cfg.j2 b/roles/ex-lb/templates/haproxy.cfg.j2 index 732b1ab..b8df0a6 100644 --- a/roles/ex-lb/templates/haproxy.cfg.j2 +++ b/roles/ex-lb/templates/haproxy.cfg.j2 @@ -13,14 +13,14 @@ defaults timeout client 10m timeout server 10m -listen kube-master +listen kube_master bind 0.0.0.0:{{ EX_APISERVER_PORT }} mode tcp option tcplog option dontlognull option dontlog-normal balance {{ BALANCE_ALG }} -{% for host in groups['kube-master'] %} +{% for host in groups['kube_master'] %} server {{ host }} {{ host }}:6443 check inter 5s fall 2 rise 2 weight 1 {% endfor %} @@ -32,12 +32,12 @@ listen ingress-node option dontlognull option dontlog-normal balance {{ BALANCE_ALG }} -{% if groups['kube-node']|length > 3 %} - server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23456 check inter 5s fall 2 rise 2 weight 1 - server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23456 check inter 5s fall 2 rise 2 weight 1 - server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23456 check inter 5s fall 2 rise 2 weight 1 +{% if groups['kube_node']|length > 3 %} + server {{ groups['kube_node'][0] }} {{ groups['kube_node'][0] }}:23456 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube_node'][1] }} {{ groups['kube_node'][1] }}:23456 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube_node'][2] }} {{ groups['kube_node'][2] }}:23456 check inter 5s fall 2 rise 2 weight 1 {% else %} -{% for host in groups['kube-node'] %} +{% for host in groups['kube_node'] %} server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1 {% endfor %} {% endif %} @@ -51,12 +51,12 @@ listen ingress-node-tls option dontlognull option dontlog-normal balance {{ BALANCE_ALG }} -{% if groups['kube-node']|length > 3 %} - server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23457 check inter 5s fall 2 rise 2 weight 1 - server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23457 check inter 5s fall 2 rise 2 weight 1 - server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23457 check inter 5s fall 2 rise 2 weight 1 +{% if groups['kube_node']|length > 3 %} + server {{ groups['kube_node'][0] }} {{ groups['kube_node'][0] }}:23457 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube_node'][1] }} {{ groups['kube_node'][1] }}:23457 check inter 5s fall 2 rise 2 weight 1 + server {{ groups['kube_node'][2] }} {{ groups['kube_node'][2] }}:23457 check inter 5s fall 2 rise 2 weight 1 {% else %} -{% for host in groups['kube-node'] %} +{% for host in groups['kube_node'] %} server {{ host }} {{ host }}:23457 check inter 5s fall 2 rise 2 weight 1 {% endfor %} {% endif %} diff --git a/roles/ex-lb/templates/keepalived-backup.conf.j2 b/roles/ex-lb/templates/keepalived-backup.conf.j2 index 3c90e11..738a92a 100644 --- a/roles/ex-lb/templates/keepalived-backup.conf.j2 +++ b/roles/ex-lb/templates/keepalived-backup.conf.j2 @@ -9,12 +9,12 @@ vrrp_script check-haproxy { weight -60 } -vrrp_instance VI-kube-master { +vrrp_instance VI-kube_master { state BACKUP priority {{ 119 | random(61, 1) }} unicast_src_ip {{ inventory_hostname }} unicast_peer { -{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %} +{% for h in groups['ex_lb'] %}{% if h != inventory_hostname %} {{ h }} {% endif %}{% endfor %} } diff --git a/roles/ex-lb/templates/keepalived-master.conf.j2 b/roles/ex-lb/templates/keepalived-master.conf.j2 index c41417f..93b3b03 100644 --- a/roles/ex-lb/templates/keepalived-master.conf.j2 +++ b/roles/ex-lb/templates/keepalived-master.conf.j2 @@ -9,12 +9,12 @@ vrrp_script check-haproxy { weight -60 } -vrrp_instance VI-kube-master { +vrrp_instance VI-kube_master { state MASTER priority 120 unicast_src_ip {{ inventory_hostname }} unicast_peer { -{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %} +{% for h in groups['ex_lb'] %}{% if h != inventory_hostname %} {{ h }} {% endif %}{% endfor %} } diff --git a/roles/kube-master/tasks/main.yml b/roles/kube-master/tasks/main.yml index bad85e3..a55086a 100644 --- a/roles/kube-master/tasks/main.yml +++ b/roles/kube-master/tasks/main.yml @@ -1,4 +1,4 @@ -- name: 下载 kube-master 二进制 +- name: 下载 kube_master 二进制 copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755 with_items: - kube-apiserver diff --git a/roles/kube-master/templates/kubernetes-csr.json.j2 b/roles/kube-master/templates/kubernetes-csr.json.j2 index 45052a6..6aca552 100644 --- a/roles/kube-master/templates/kubernetes-csr.json.j2 +++ b/roles/kube-master/templates/kubernetes-csr.json.j2 @@ -2,10 +2,10 @@ "CN": "kubernetes", "hosts": [ "127.0.0.1", -{% if groups['ex-lb']|length > 0 %} - "{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}", +{% if groups['ex_lb']|length > 0 %} + "{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}", {% endif %} -{% for host in groups['kube-master'] %} +{% for host in groups['kube_master'] %} "{{ host }}", {% endfor %} "{{ CLUSTER_KUBERNETES_SVC_IP }}", diff --git a/roles/kube-node/tasks/main.yml b/roles/kube-node/tasks/main.yml index efc8103..437e099 100644 --- a/roles/kube-node/tasks/main.yml +++ b/roles/kube-node/tasks/main.yml @@ -1,4 +1,4 @@ -- name: 创建kube-node 相关目录 +- name: 创建kube_node 相关目录 file: name={{ item }} state=directory with_items: - /var/lib/kubelet @@ -18,7 +18,7 @@ # 每个 node 节点运行 haproxy 连接到多个 apiserver - import_tasks: node_lb.yml - when: "inventory_hostname not in groups['kube-master']" + when: "inventory_hostname not in groups['kube_master']" - name: 替换 kubeconfig 的 apiserver 地址 lineinfile: diff --git a/roles/kube-node/tasks/node_lb.yml b/roles/kube-node/tasks/node_lb.yml index 88bb449..b70a52f 100644 --- a/roles/kube-node/tasks/node_lb.yml +++ b/roles/kube-node/tasks/node_lb.yml @@ -1,7 +1,7 @@ -# kube-node 节点成员不能同时是 ex-lb 节点,因为它们都需要安装 haproxy +# kube_node 节点成员不能同时是 ex_lb 节点,因为它们都需要安装 haproxy - name: fail info1 - fail: msg="an 'kube-node' node CAN NOT be a 'ex-lb' node at the same time" - when: "inventory_hostname in groups['ex-lb']" + fail: msg="an 'kube_node' node CAN NOT be a 'ex_lb' node at the same time" + when: "inventory_hostname in groups['ex_lb']" tags: restart_lb - name: 安装 haproxy @@ -40,7 +40,7 @@ # 仅 master 节点数大于1时才启动haproxy - name: 开启haproxy服务 shell: systemctl start haproxy - when: "groups['kube-master']|length > 1" + when: "groups['kube_master']|length > 1" tags: restart_lb # master 节点从1个增加到2个时候,需要修改如下配置 @@ -56,11 +56,11 @@ - "/etc/kubernetes/kubelet.kubeconfig" - "/etc/kubernetes/kube-proxy.kubeconfig" - - name: restart kube-node service + - name: restart kube_node service service: name={{ item }} state=restarted with_items: - kubelet - kube-proxy ignore_errors: true - when: "MASTER_CHG == 'yes' and groups['kube-master']|length < 3" + when: "MASTER_CHG == 'yes' and groups['kube_master']|length < 3" tags: restart_lb diff --git a/roles/kube-node/templates/haproxy.cfg.j2 b/roles/kube-node/templates/haproxy.cfg.j2 index 34b0715..76cb5d5 100644 --- a/roles/kube-node/templates/haproxy.cfg.j2 +++ b/roles/kube-node/templates/haproxy.cfg.j2 @@ -12,13 +12,13 @@ defaults timeout client 10m timeout server 10m -listen kube-master +listen kube_master bind 127.0.0.1:6443 mode tcp option tcplog option dontlognull option dontlog-normal balance {{ BALANCE_ALG }} -{% for host in groups['kube-master'] %} +{% for host in groups['kube_master'] %} server {{ host }} {{ host }}:6443 check inter 10s fall 2 rise 2 weight 1 {% endfor %} diff --git a/roles/kube-node/vars/main.yml b/roles/kube-node/vars/main.yml index 1728703..c8933ac 100644 --- a/roles/kube-node/vars/main.yml +++ b/roles/kube-node/vars/main.yml @@ -2,13 +2,13 @@ CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}" # 设置 APISERVER 地址 -KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \ +KUBE_APISERVER: "{%- if inventory_hostname in groups['kube_master'] -%} \ https://{{ inventory_hostname }}:6443 \ {%- else -%} \ - {%- if groups['kube-master']|length > 1 -%} \ + {%- if groups['kube_master']|length > 1 -%} \ https://127.0.0.1:6443 \ {%- else -%} \ - https://{{ groups['kube-master'][0] }}:6443 \ + https://{{ groups['kube_master'][0] }}:6443 \ {%- endif -%} \ {%- endif -%}" diff --git a/roles/kube-router/kube-router.yml b/roles/kube-router/kube-router.yml index 5d2fae6..1c86b19 100644 --- a/roles/kube-router/kube-router.yml +++ b/roles/kube-router/kube-router.yml @@ -1,5 +1,5 @@ - hosts: - - kube-master - - kube-node + - kube_master + - kube_node roles: - kube-router diff --git a/roles/os-harden/os-harden.yml b/roles/os-harden/os-harden.yml index 796f220..546bcf2 100644 --- a/roles/os-harden/os-harden.yml +++ b/roles/os-harden/os-harden.yml @@ -1,9 +1,9 @@ # [可选]操作系统安全加固 https://github.com/dev-sec/ansible-os-hardening - hosts: - - kube-master - - kube-node + - kube_master + - kube_node - etcd - - ex-lb + - ex_lb - chrony vars: os_security_users_allow: change_user diff --git a/roles/prepare/tasks/main.yml b/roles/prepare/tasks/main.yml index 85ac396..65f0768 100644 --- a/roles/prepare/tasks/main.yml +++ b/roles/prepare/tasks/main.yml @@ -49,5 +49,5 @@ with_items: - kube-controller-manager.kubeconfig - kube-scheduler.kubeconfig - when: "inventory_hostname in groups['kube-master']" - when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']" + when: "inventory_hostname in groups['kube_master']" + when: "inventory_hostname in groups['kube_master'] or inventory_hostname in groups['kube_node']"