mirror of https://github.com/easzlab/kubeasz.git
修复ansible group命名不规范问题
parent
f8cfbe2b09
commit
63a7e6d7ee
|
@ -112,7 +112,7 @@ display_skipped_hosts = False
|
||||||
# by default (as of 1.4), Ansible may display deprecation warnings for language
|
# by default (as of 1.4), Ansible may display deprecation warnings for language
|
||||||
# features that should no longer be used and will be removed in future versions.
|
# features that should no longer be used and will be removed in future versions.
|
||||||
# to disable these warnings, set the following value to False:
|
# to disable these warnings, set the following value to False:
|
||||||
#deprecation_warnings = True
|
deprecation_warnings = False
|
||||||
|
|
||||||
# (as of 1.8), Ansible can optionally warn when usage of the shell and
|
# (as of 1.8), Ansible can optionally warn when usage of the shell and
|
||||||
# command module appear to be simplified by using a default Ansible module
|
# command module appear to be simplified by using a default Ansible module
|
||||||
|
|
|
@ -41,7 +41,7 @@ Habor是由VMWare中国团队开源的容器镜像仓库。事实上,Habor是
|
||||||
- role `prepare` 基础系统环境准备
|
- role `prepare` 基础系统环境准备
|
||||||
- role `docker` 安装docker
|
- role `docker` 安装docker
|
||||||
- role `harbor` 安装harbor
|
- role `harbor` 安装harbor
|
||||||
- 注意:`kube-node`节点在harbor部署完之后,需要配置harbor的证书(详见下节配置docker/containerd信任harbor证书),并可以在hosts里面添加harbor的域名解析,如果你的环境中有dns服务器,可以跳过hosts文件设置
|
- 注意:`kube_node`节点在harbor部署完之后,需要配置harbor的证书(详见下节配置docker/containerd信任harbor证书),并可以在hosts里面添加harbor的域名解析,如果你的环境中有dns服务器,可以跳过hosts文件设置
|
||||||
|
|
||||||
请在另外窗口打开 [roles/harbor/tasks/main.yml](../../roles/harbor/tasks/main.yml),对照以下讲解
|
请在另外窗口打开 [roles/harbor/tasks/main.yml](../../roles/harbor/tasks/main.yml),对照以下讲解
|
||||||
|
|
||||||
|
|
|
@ -1,107 +0,0 @@
|
||||||
# 更改高可用 `Master IP`
|
|
||||||
|
|
||||||
**WARNING:** 更改集群的 `Master VIP`操作有风险,不建议在生产环境直接操作,此文档实践一个修改的操作流程,帮助理解整个集群运行架构和 `kubeasz`的部署逻辑,请在测试环境操作练手。
|
|
||||||
**BUG:** 目前该操作只适用于集群网络选用`calico`,如果使用`flannel`操作变更后会出现POD地址分配错误的BUG。
|
|
||||||
|
|
||||||
首先分析大概操作思路:
|
|
||||||
|
|
||||||
- 修改`/etc/ansible/hosts`里面的配置项`MASTER_IP` `KUBE_APISERVER`
|
|
||||||
- 修改LB节点的keepalive的配置,重启keepalived服务
|
|
||||||
- 修改kubectl/kube-proxy的配置文件,使用新VIP地址更新api-server地址
|
|
||||||
- 重新生成master证书,hosts字段包含新VIP地址
|
|
||||||
- 修改kubelet的配置文件(kubelet的配置文件和证书是由bootstrap机制自动生成的)
|
|
||||||
- 删除kubelet.kubeconfig
|
|
||||||
- 删除集群所有node 节点
|
|
||||||
- 所有节点重新bootstrap
|
|
||||||
|
|
||||||
## 变更前状态验证
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ kubectl get cs,node,pod -o wide
|
|
||||||
NAME STATUS MESSAGE ERROR
|
|
||||||
controller-manager Healthy ok
|
|
||||||
scheduler Healthy ok
|
|
||||||
etcd-2 Healthy {"health":"true"}
|
|
||||||
etcd-0 Healthy {"health":"true"}
|
|
||||||
etcd-1 Healthy {"health":"true"}
|
|
||||||
|
|
||||||
NAME STATUS ROLES AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
|
||||||
192.168.1.41 Ready,SchedulingDisabled <none> 2h v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0
|
|
||||||
192.168.1.42 Ready,SchedulingDisabled <none> 2h v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0
|
|
||||||
192.168.1.43 Ready <none> 2h v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0
|
|
||||||
192.168.1.44 Ready <none> 2h v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0
|
|
||||||
192.168.1.45 Ready <none> 2h v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0
|
|
||||||
|
|
||||||
NAME READY STATUS RESTARTS AGE IP NODE
|
|
||||||
busy-5d6b6b5d4b-8wxkp 1/1 Running 0 17h 172.20.135.133 192.168.1.41
|
|
||||||
busy-5d6b6b5d4b-fcmkp 1/1 Running 0 17h 172.20.135.128 192.168.1.41
|
|
||||||
busy-5d6b6b5d4b-ptvd7 1/1 Running 0 17h 172.20.135.136 192.168.1.41
|
|
||||||
nginx-768979984b-ncqbp 1/1 Running 0 17h 172.20.135.137 192.168.1.41
|
|
||||||
|
|
||||||
# 查看待变更集群 Master VIP
|
|
||||||
$ kubectl cluster-info
|
|
||||||
Kubernetes master is running at https://192.168.1.39:8443
|
|
||||||
```
|
|
||||||
|
|
||||||
## 变更操作
|
|
||||||
|
|
||||||
- `ansible playbook`可以使用tags来控制只允许部分任务执行,这里为简化操作没有细化,在ansible控制端具体操作如下:
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
# 1.修改/etc/ansible/hosts 配置项MASTER_IP,KUBE_APISERVER
|
|
||||||
|
|
||||||
# 2.删除集群所有node节点,等待重新bootstrap
|
|
||||||
$ kubectl get node |grep Ready|awk '{print $1}' |xargs kubectl delete node
|
|
||||||
|
|
||||||
# 3.重置keepalived 和修改kubectl/kube-proxy/bootstrap配置
|
|
||||||
$ ansible-playbook 01.prepare.yml
|
|
||||||
|
|
||||||
# 4.删除旧master证书
|
|
||||||
$ ansible kube-master -m file -a 'path=/etc/kubernetes/ssl/kubernetes.pem state=absent'
|
|
||||||
|
|
||||||
# 5.删除旧kubelet配置文件
|
|
||||||
$ ansible all -m file -a 'path=/etc/kubernetes/kubelet.kubeconfig state=absent'
|
|
||||||
|
|
||||||
# 6.重新配置启动master节点
|
|
||||||
$ ansible-playbook 04.kube-master.yml
|
|
||||||
|
|
||||||
# 7.重新配置启动node节点
|
|
||||||
$ ansible-playbook 05.kube-node.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
## 变更后验证
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
$ kubectl get cs,node,pod -o wide
|
|
||||||
NAME STATUS MESSAGE ERROR
|
|
||||||
scheduler Healthy ok
|
|
||||||
controller-manager Healthy ok
|
|
||||||
etcd-2 Healthy {"health":"true"}
|
|
||||||
etcd-1 Healthy {"health":"true"}
|
|
||||||
etcd-0 Healthy {"health":"true"}
|
|
||||||
|
|
||||||
NAME STATUS ROLES AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
|
||||||
192.168.1.41 Ready,SchedulingDisabled <none> 4m v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0
|
|
||||||
192.168.1.42 Ready,SchedulingDisabled <none> 4m v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0
|
|
||||||
192.168.1.43 Ready <none> 3m v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-97-generic docker://18.3.0
|
|
||||||
192.168.1.44 Ready <none> 3m v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0
|
|
||||||
192.168.1.45 Ready <none> 3m v1.10.0 <none> Ubuntu 16.04.3 LTS 4.4.0-98-generic docker://18.3.0
|
|
||||||
|
|
||||||
NAME READY STATUS RESTARTS AGE IP NODE
|
|
||||||
busy-5d6b6b5d4b-25hfr 1/1 Running 0 5m 172.20.237.64 192.168.1.43
|
|
||||||
busy-5d6b6b5d4b-cdzb5 1/1 Running 0 5m 172.20.145.192 192.168.1.44
|
|
||||||
busy-5d6b6b5d4b-m2rf7 1/1 Running 0 5m 172.20.26.131 192.168.1.45
|
|
||||||
nginx-768979984b-2ngww 1/1 Running 0 5m 172.20.145.193 192.168.1.44
|
|
||||||
|
|
||||||
# 查看集群master VIP已经变更
|
|
||||||
$ kubectl cluster-info
|
|
||||||
Kubernetes master is running at https://192.168.1.40:8443
|
|
||||||
```
|
|
||||||
|
|
||||||
## 小结
|
|
||||||
|
|
||||||
本示例操作演示了多主多节点k8s集群变更`Master VIP`的操作,有助于理解整个集群组件架构和`kubeasz`的安装逻辑,小结如下:
|
|
||||||
|
|
||||||
- 变更操作不影响集群已运行的业务POD,但是操作过程中业务会中断
|
|
||||||
- 已运行POD会重新调度到各node节点,如果业务POD量很大,短时间内会对集群造成压力
|
|
||||||
- 不建议在生成环境直接操作,本示例演示说明为主
|
|
|
@ -5,8 +5,8 @@
|
||||||
```
|
```
|
||||||
"hosts": [
|
"hosts": [
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
{% if groups['ex-lb']|length > 0 %}
|
{% if groups['ex_lb']|length > 0 %}
|
||||||
"{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}",
|
"{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}",
|
||||||
{% endif %}
|
{% endif %}
|
||||||
"{{ inventory_hostname }}",
|
"{{ inventory_hostname }}",
|
||||||
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
||||||
|
|
|
@ -6,9 +6,9 @@
|
||||||
- 2.部署ingress-controller时使用`LoadBalancer`类型服务,需要集群支持`LoadBalancer`
|
- 2.部署ingress-controller时使用`LoadBalancer`类型服务,需要集群支持`LoadBalancer`
|
||||||
- 3.部署ingress-controller时使用`nodePort`类型服务,然后在集群外使用 haproxy/f5 等配置 virtual server 集群
|
- 3.部署ingress-controller时使用`nodePort`类型服务,然后在集群外使用 haproxy/f5 等配置 virtual server 集群
|
||||||
|
|
||||||
本文档讲解使用 haproxy 配置 ingress的 VS 集群,前提是配置了自建`ex-lb`节点
|
本文档讲解使用 haproxy 配置 ingress的 VS 集群,前提是配置了自建`ex_lb`节点
|
||||||
|
|
||||||
## 1.配置 ex-lb 参数开启转发 ingress nodeport
|
## 1.配置 ex_lb 参数开启转发 ingress nodeport
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# 编辑 roles/ex-lb/defaults/main.yml,配置如下变量
|
# 编辑 roles/ex-lb/defaults/main.yml,配置如下变量
|
||||||
|
@ -22,11 +22,11 @@ INGRESS_TLS_NODEPORT_LB: "yes"
|
||||||
$ ansible-playbook /etc/ansible/roles/ex-lb/ex-lb.yml
|
$ ansible-playbook /etc/ansible/roles/ex-lb/ex-lb.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
## 3.验证 ex-lb 节点的 haproxy 服务配置 `/etc/haproxy/haproxy.cfg` 包含如下配置
|
## 3.验证 ex_lb 节点的 haproxy 服务配置 `/etc/haproxy/haproxy.cfg` 包含如下配置
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
... 前文省略
|
... 前文省略
|
||||||
listen kube-master
|
listen kube_master
|
||||||
bind 0.0.0.0:8443
|
bind 0.0.0.0:8443
|
||||||
mode tcp
|
mode tcp
|
||||||
option tcplog
|
option tcplog
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
- [管理 MASTER 节点](op-master.md)
|
- [管理 MASTER 节点](op-master.md)
|
||||||
- [管理 ETCD 节点](op-etcd.md)
|
- [管理 ETCD 节点](op-etcd.md)
|
||||||
- [升级 K8S 版本](upgrade.md)
|
- [升级 K8S 版本](upgrade.md)
|
||||||
- [修改多主集群VIP地址](ChangeVIP.md)
|
|
||||||
- [修改AIO部署的系统IP](change_ip_allinone.md)
|
- [修改AIO部署的系统IP](change_ip_allinone.md)
|
||||||
- [替换集群使用的网络插件](change_k8s_network.md)
|
- [替换集群使用的网络插件](change_k8s_network.md)
|
||||||
- [集群备份与恢复](cluster_restore.md)
|
- [集群备份与恢复](cluster_restore.md)
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
# 管理 kube-master 节点
|
# 管理 kube_master 节点
|
||||||
|
|
||||||
## 1.增加 kube-master 节点
|
## 1.增加 kube_master 节点
|
||||||
|
|
||||||
新增`kube-master`节点大致流程为:tools/03.addmaster.yml
|
新增`kube_master`节点大致流程为:tools/03.addmaster.yml
|
||||||
- [可选]新节点安装 chrony 时间同步
|
- [可选]新节点安装 chrony 时间同步
|
||||||
- 新节点预处理 prepare
|
- 新节点预处理 prepare
|
||||||
- 新节点安装 docker 服务
|
- 新节点安装 docker 服务
|
||||||
- 新节点安装 kube-master 服务
|
- 新节点安装 kube_master 服务
|
||||||
- 新节点安装 kube-node 服务
|
- 新节点安装 kube_node 服务
|
||||||
- 新节点安装网络插件相关
|
- 新节点安装网络插件相关
|
||||||
- 禁止业务 pod调度到新master节点
|
- 禁止业务 pod调度到新master节点
|
||||||
- 更新 node 节点 haproxy 负载均衡并重启
|
- 更新 node 节点 haproxy 负载均衡并重启
|
||||||
|
@ -41,10 +41,10 @@ NAME STATUS ROLES AGE VERSION
|
||||||
192.168.1.11 Ready,SchedulingDisabled <none> 2h v1.9.3 # 新增 master节点
|
192.168.1.11 Ready,SchedulingDisabled <none> 2h v1.9.3 # 新增 master节点
|
||||||
```
|
```
|
||||||
|
|
||||||
## 2.删除 kube-master 节点
|
## 2.删除 kube_master 节点
|
||||||
|
|
||||||
|
|
||||||
删除`kube-master`节点大致流程为:tools/13.delmaster.yml
|
删除`kube_master`节点大致流程为:tools/13.delmaster.yml
|
||||||
- 检测是否可以删除
|
- 检测是否可以删除
|
||||||
- 迁移节点 pod
|
- 迁移节点 pod
|
||||||
- 删除 master 相关服务及文件
|
- 删除 master 相关服务及文件
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
# 管理 node 节点
|
# 管理 node 节点
|
||||||
|
|
||||||
目录
|
目录
|
||||||
- 1.增加 kube-node 节点
|
- 1.增加 kube_node 节点
|
||||||
- 2.增加非标准ssh端口节点
|
- 2.增加非标准ssh端口节点
|
||||||
- 3.删除 kube-node 节点
|
- 3.删除 kube_node 节点
|
||||||
|
|
||||||
## 1.增加 kube-node 节点
|
## 1.增加 kube_node 节点
|
||||||
|
|
||||||
新增`kube-node`节点大致流程为:tools/02.addnode.yml
|
新增`kube_node`节点大致流程为:tools/02.addnode.yml
|
||||||
- [可选]新节点安装 chrony 时间同步
|
- [可选]新节点安装 chrony 时间同步
|
||||||
- 新节点预处理 prepare
|
- 新节点预处理 prepare
|
||||||
- 新节点安装 docker 服务
|
- 新节点安装 docker 服务
|
||||||
- 新节点安装 kube-node 服务
|
- 新节点安装 kube_node 服务
|
||||||
- 新节点安装网络插件相关
|
- 新节点安装网络插件相关
|
||||||
|
|
||||||
### 操作步骤
|
### 操作步骤
|
||||||
|
@ -39,13 +39,13 @@ $ kubectl get pod -n kube-system
|
||||||
目前 ezctl 暂不支持自动添加非标准 ssh 端口的节点,可以手动操作如下:
|
目前 ezctl 暂不支持自动添加非标准 ssh 端口的节点,可以手动操作如下:
|
||||||
|
|
||||||
- 假设待添加节点192.168.2.1,ssh 端口 10022;配置免密登录 ssh-copy-id -p 10022 192.168.2.1,按提示输入密码
|
- 假设待添加节点192.168.2.1,ssh 端口 10022;配置免密登录 ssh-copy-id -p 10022 192.168.2.1,按提示输入密码
|
||||||
- 在 /etc/ansible/hosts文件 [kube-node] 组下添加一行:
|
- 在 /etc/ansible/hosts文件 [kube_node] 组下添加一行:
|
||||||
```
|
```
|
||||||
192.168.2.1 ansible_ssh_port=10022
|
192.168.2.1 ansible_ssh_port=10022
|
||||||
```
|
```
|
||||||
- 最后执行 `ansible-playbook /etc/ansible/tools/02.addnode.yml -e NODE_TO_ADD=192.168.2.1`
|
- 最后执行 `ansible-playbook /etc/ansible/tools/02.addnode.yml -e NODE_TO_ADD=192.168.2.1`
|
||||||
|
|
||||||
## 3.删除 kube-node 节点
|
## 3.删除 kube_node 节点
|
||||||
|
|
||||||
删除 node 节点流程:tools/12.delnode.yml
|
删除 node 节点流程:tools/12.delnode.yml
|
||||||
- 检测是否可以删除
|
- 检测是否可以删除
|
||||||
|
|
|
@ -8,7 +8,7 @@ CHANGELOG:
|
||||||
- 集群安装:
|
- 集群安装:
|
||||||
- 更新 calico 3.3.2,并保留3.2.4可选
|
- 更新 calico 3.3.2,并保留3.2.4可选
|
||||||
- 修复特定环境下lb节点变量LB_IF自动设置错误
|
- 修复特定环境下lb节点变量LB_IF自动设置错误
|
||||||
- 移除 kube-node csr 请求批准部分(PR #399)
|
- 移除 kube_node csr 请求批准部分(PR #399)
|
||||||
- 添加支持 RedHat (PR #431)
|
- 添加支持 RedHat (PR #431)
|
||||||
- 修改 docker 存储的目录设置(PR #436)
|
- 修改 docker 存储的目录设置(PR #436)
|
||||||
- 更新 kube-schedule 监听参数 (PR #440)
|
- 更新 kube-schedule 监听参数 (PR #440)
|
||||||
|
|
|
@ -13,7 +13,7 @@ CHANGELOG:
|
||||||
- helm/tiller: v2.12.3
|
- helm/tiller: v2.12.3
|
||||||
- 集群安装:
|
- 集群安装:
|
||||||
- **增加添加/删除 etcd 节点**脚本和[文档](https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md)
|
- **增加添加/删除 etcd 节点**脚本和[文档](https://github.com/easzlab/kubeasz/blob/master/docs/op/op-etcd.md)
|
||||||
- **增加可选配置附加负载均衡节点(ex-lb)**,可用于负载均衡 NodePort 方式暴露的服务
|
- **增加可选配置附加负载均衡节点(ex_lb)**,可用于负载均衡 NodePort 方式暴露的服务
|
||||||
- 更新删除节点脚本和[文档](https://github.com/easzlab/kubeasz/blob/master/docs/op/del_one_node.md)
|
- 更新删除节点脚本和[文档](https://github.com/easzlab/kubeasz/blob/master/docs/op/del_one_node.md)
|
||||||
- 优化增加 node 和增加 master 节点流程
|
- 优化增加 node 和增加 master 节点流程
|
||||||
- 更新 harbor 安装流程和文档
|
- 更新 harbor 安装流程和文档
|
||||||
|
|
|
@ -15,7 +15,7 @@ CHANGELOG: (0.6.x 版本以后)
|
||||||
- 优化 ansible hosts 配置,更加精简、易用
|
- 优化 ansible hosts 配置,更加精简、易用
|
||||||
- 废弃 new-node/new-master/new-etcd 主机组,对应功能已集成在 easzctl 命令行
|
- 废弃 new-node/new-master/new-etcd 主机组,对应功能已集成在 easzctl 命令行
|
||||||
- 废弃变量 K8S_VER,改为自动识别,避免手工配置错误
|
- 废弃变量 K8S_VER,改为自动识别,避免手工配置错误
|
||||||
- 迁移 basic_auth 相关配置至 roles:kube-master,增强初始安全性,且默认关闭apiserver的用户名/密码认证,详见 roles/kube-master/defaults/main.yml
|
- 迁移 basic_auth 相关配置至 roles:kube_master,增强初始安全性,且默认关闭apiserver的用户名/密码认证,详见 roles/kube-master/defaults/main.yml
|
||||||
- easzctl 提供以下集群层面操作
|
- easzctl 提供以下集群层面操作
|
||||||
- 切换/创建集群 context
|
- 切换/创建集群 context
|
||||||
- 删除当前集群
|
- 删除当前集群
|
||||||
|
|
|
@ -10,11 +10,11 @@ CHANGELOG:
|
||||||
- 集群安装:
|
- 集群安装:
|
||||||
- 废弃 ansible hosts 中 deploy 角色,精简保留2个预定义节点规划例子(example/hosts.xx)
|
- 废弃 ansible hosts 中 deploy 角色,精简保留2个预定义节点规划例子(example/hosts.xx)
|
||||||
- 重构 prepare 安装流程(删除 deploy 角色,移除 lb 节点创建)
|
- 重构 prepare 安装流程(删除 deploy 角色,移除 lb 节点创建)
|
||||||
- 调整 kube-master 安装流程
|
- 调整 kube_master 安装流程
|
||||||
- 调整 kube-node 安装流程(node 节点新增 haproxy 服务)
|
- 调整 kube_node 安装流程(node 节点新增 haproxy 服务)
|
||||||
- 调整 network 等其他安装流程
|
- 调整 network 等其他安装流程
|
||||||
- 精简 example hosts 配置文件及配置项
|
- 精简 example hosts 配置文件及配置项
|
||||||
- 调整 ex-lb 安装流程【可选】
|
- 调整 ex_lb 安装流程【可选】
|
||||||
- 添加 docker/containerd 安装时互斥判断
|
- 添加 docker/containerd 安装时互斥判断
|
||||||
- 新增 role: clean,重写清理脚本 99.clean.yml
|
- 新增 role: clean,重写清理脚本 99.clean.yml
|
||||||
- 废弃 tools/clean_one_node.yml
|
- 废弃 tools/clean_one_node.yml
|
||||||
|
@ -38,7 +38,7 @@ CHANGELOG:
|
||||||
- node 节点安装文档
|
- node 节点安装文档
|
||||||
- ...
|
- ...
|
||||||
- 集群操作管理文档更新(docs/op/op-index.md)
|
- 集群操作管理文档更新(docs/op/op-index.md)
|
||||||
- 新增可选外部负载均衡文档(docs/setup/ex-lb.md)
|
- 新增可选外部负载均衡文档(docs/setup/ex_lb.md)
|
||||||
- 新增容器化系统服务 haproxy/chrony 文档(docs/practice/dockerize_system_service.md)
|
- 新增容器化系统服务 haproxy/chrony 文档(docs/practice/dockerize_system_service.md)
|
||||||
- 其他:
|
- 其他:
|
||||||
- fix: 对已有集群进行安全加固时禁用 ip_forward 问题
|
- fix: 对已有集群进行安全加固时禁用 ip_forward 问题
|
||||||
|
|
|
@ -24,7 +24,7 @@ CHANGELOG:
|
||||||
- new logo
|
- new logo
|
||||||
- fix: 执行roles/cluster-storage/cluster-storage.yml 报错不存在`deploy`
|
- fix: 执行roles/cluster-storage/cluster-storage.yml 报错不存在`deploy`
|
||||||
- fix: 部分os启用kube-reserved出错(提示/sys/fs/cgroup只读)
|
- fix: 部分os启用kube-reserved出错(提示/sys/fs/cgroup只读)
|
||||||
- fix: ex-lb 组少量 keepalived 相关配置
|
- fix: ex_lb 组少量 keepalived 相关配置
|
||||||
- fix: 偶然出现docker安装时提示找不到变量`docker_ver`
|
- fix: 偶然出现docker安装时提示找不到变量`docker_ver`
|
||||||
- fix: Ubuntu1804 pod内dns解析不到外网
|
- fix: Ubuntu1804 pod内dns解析不到外网
|
||||||
- fix: k8s 相关服务在接收SIGPIPE信号停止后不重启问题 #631 thx to gj19910723
|
- fix: k8s 相关服务在接收SIGPIPE信号停止后不重启问题 #631 thx to gj19910723
|
||||||
|
|
|
@ -23,7 +23,7 @@ CHANGELOG:
|
||||||
- easzup: 修复安装 docker 逻辑 aa76da0f2ee2b01d47c28667feed36b6be778b17
|
- easzup: 修复安装 docker 逻辑 aa76da0f2ee2b01d47c28667feed36b6be778b17
|
||||||
- 其他
|
- 其他
|
||||||
- fix: dashboard生成cluster-service #739
|
- fix: dashboard生成cluster-service #739
|
||||||
- fix: ubuntu1804安装ex-lb失败问题
|
- fix: ubuntu1804安装ex_lb失败问题
|
||||||
- fix: calico的BGP RR模式下的bgppeer的nodeSelector错误 #741
|
- fix: calico的BGP RR模式下的bgppeer的nodeSelector错误 #741
|
||||||
- fix: ectd集群有不正常节点时增/删etcd节点失败 #743
|
- fix: ectd集群有不正常节点时增/删etcd节点失败 #743
|
||||||
- fix: kube-router 安装报错 #783
|
- fix: kube-router 安装报错 #783
|
||||||
|
|
|
@ -98,7 +98,6 @@ chmod +x ./ezdown
|
||||||
ezctl new k8s-01
|
ezctl new k8s-01
|
||||||
2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01
|
2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01
|
||||||
2021-01-19 10:48:23 DEBUG set version of common plugins
|
2021-01-19 10:48:23 DEBUG set version of common plugins
|
||||||
2021-01-19 10:48:23 DEBUG disable registry mirrors
|
|
||||||
2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created.
|
2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created.
|
||||||
2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
|
2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
|
||||||
2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'
|
2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## 04-安装kube-master节点
|
## 04-安装kube_master节点
|
||||||
|
|
||||||
部署master节点主要包含三个组件`apiserver` `scheduler` `controller-manager`,其中:
|
部署master节点主要包含三个组件`apiserver` `scheduler` `controller-manager`,其中:
|
||||||
|
|
||||||
|
@ -39,8 +39,8 @@ roles/kube-master/
|
||||||
"CN": "kubernetes",
|
"CN": "kubernetes",
|
||||||
"hosts": [
|
"hosts": [
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
{% if groups['ex-lb']|length > 0 %}
|
{% if groups['ex_lb']|length > 0 %}
|
||||||
"{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}",
|
"{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}",
|
||||||
{% endif %}
|
{% endif %}
|
||||||
"{{ inventory_hostname }}",
|
"{{ inventory_hostname }}",
|
||||||
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
||||||
|
@ -69,7 +69,7 @@ roles/kube-master/
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
- kubernetes 证书既是服务器证书,同时apiserver又作为客户端证书去访问etcd 集群;作为服务器证书需要设置hosts 指定使用该证书的IP 或域名列表,需要注意的是:
|
- kubernetes 证书既是服务器证书,同时apiserver又作为客户端证书去访问etcd 集群;作为服务器证书需要设置hosts 指定使用该证书的IP 或域名列表,需要注意的是:
|
||||||
- 如果配置 ex-lb,需要把 EX_APISERVER_VIP 也配置进去
|
- 如果配置 ex_lb,需要把 EX_APISERVER_VIP 也配置进去
|
||||||
- 如果需要外部访问 apiserver,需要在 defaults/main.yml 配置 MASTER_CERT_HOSTS
|
- 如果需要外部访问 apiserver,需要在 defaults/main.yml 配置 MASTER_CERT_HOSTS
|
||||||
- `kubectl get svc` 将看到集群中由api-server 创建的默认服务 `kubernetes`,因此也要把 `kubernetes` 服务名和各个服务域名也添加进去
|
- `kubectl get svc` 将看到集群中由api-server 创建的默认服务 `kubernetes`,因此也要把 `kubernetes` 服务名和各个服务域名也添加进去
|
||||||
|
|
||||||
|
@ -207,10 +207,10 @@ WantedBy=multi-user.target
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# vi 04.kube-master.yml
|
# vi 04.kube-master.yml
|
||||||
- hosts: kube-master
|
- hosts: kube_master
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
# 禁止业务 pod调度到 master节点
|
# 禁止业务 pod调度到 master节点
|
||||||
tasks:
|
tasks:
|
||||||
- name: 禁止业务 pod调度到 master节点
|
- name: 禁止业务 pod调度到 master节点
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
## 05-安装kube-node节点
|
## 05-安装kube_node节点
|
||||||
|
|
||||||
`kube-node` 是集群中运行工作负载的节点,前置条件需要先部署好`kube-master`节点,它需要部署如下组件:
|
`kube_node` 是集群中运行工作负载的节点,前置条件需要先部署好`kube_master`节点,它需要部署如下组件:
|
||||||
|
|
||||||
+ docker:运行容器
|
+ docker:运行容器
|
||||||
+ kubelet: kube-node上最主要的组件
|
+ kubelet: kube_node上最主要的组件
|
||||||
+ kube-proxy: 发布应用服务与负载均衡
|
+ kube-proxy: 发布应用服务与负载均衡
|
||||||
+ haproxy:用于请求转发到多个 apiserver,详见[HA-2x 架构](00-planning_and_overall_intro.md#ha-architecture)
|
+ haproxy:用于请求转发到多个 apiserver,详见[HA-2x 架构](00-planning_and_overall_intro.md#ha-architecture)
|
||||||
+ calico: 配置容器网络 (或者其他网络组件)
|
+ calico: 配置容器网络 (或者其他网络组件)
|
||||||
|
|
|
@ -13,5 +13,5 @@
|
||||||
|
|
||||||
## 下一步
|
## 下一步
|
||||||
|
|
||||||
- [创建ex-lb节点组](ex-lb.md), 向集群外提供高可用apiserver
|
- [创建ex_lb节点组](ex_lb.md), 向集群外提供高可用apiserver
|
||||||
- [创建集群持久化存储](08-cluster-storage.md)
|
- [创建集群持久化存储](08-cluster-storage.md)
|
||||||
|
|
|
@ -1,32 +1,17 @@
|
||||||
## EX-LB 负载均衡部署
|
## EX-LB 负载均衡部署
|
||||||
|
|
||||||
根据[HA 2x架构](00-planning_and_overall_intro.md),k8s集群自身高可用已经不依赖于外部 lb 服务;但是有时我们要从外部访问 apiserver(比如 CI 流程),就需要 ex-lb 来请求多个 apiserver;
|
根据[HA 2x架构](00-planning_and_overall_intro.md),k8s集群自身高可用已经不依赖于外部 lb 服务;但是有时我们要从外部访问 apiserver(比如 CI 流程),就需要 ex_lb 来请求多个 apiserver;
|
||||||
|
|
||||||
还有一种情况是需要[负载转发到ingress服务](../op/loadballance_ingress_nodeport.md),也需要部署ex-lb;
|
还有一种情况是需要[负载转发到ingress服务](../op/loadballance_ingress_nodeport.md),也需要部署ex_lb;
|
||||||
|
|
||||||
**注意:当遇到公有云环境无法自建 ex-lb 服务时,可以配置对应的云负载均衡服务**
|
**注意:当遇到公有云环境无法自建 ex_lb 服务时,可以配置对应的云负载均衡服务**
|
||||||
|
|
||||||
### ex-lb 服务组件
|
### ex_lb 服务组件
|
||||||
|
|
||||||
ex-lb 服务由 keepalived 和 haproxy 组成:
|
ex_lb 服务由 keepalived 和 haproxy 组成:
|
||||||
- haproxy:高效代理(四层模式)转发到多个 apiserver
|
- haproxy:高效代理(四层模式)转发到多个 apiserver
|
||||||
- keepalived:利用主备节点vrrp协议通信和虚拟地址,消除haproxy的单点故障
|
- keepalived:利用主备节点vrrp协议通信和虚拟地址,消除haproxy的单点故障
|
||||||
|
|
||||||
``` bash
|
|
||||||
roles/ex-lb/
|
|
||||||
├── clean-ex-lb.yml
|
|
||||||
├── defaults
|
|
||||||
│ └── main.yml
|
|
||||||
├── ex-lb.yml
|
|
||||||
├── tasks
|
|
||||||
│ └── main.yml
|
|
||||||
└── templates
|
|
||||||
├── haproxy.cfg.j2
|
|
||||||
├── haproxy.service.j2
|
|
||||||
├── keepalived-backup.conf.j2
|
|
||||||
└── keepalived-master.conf.j2
|
|
||||||
```
|
|
||||||
|
|
||||||
Haproxy支持四层和七层负载,稳定性好,根据官方文档,HAProxy可以跑满10Gbps-New benchmark of HAProxy at 10 Gbps using Myricom's 10GbE NICs (Myri-10G PCI-Express);另外,openstack高可用也有用haproxy的。
|
Haproxy支持四层和七层负载,稳定性好,根据官方文档,HAProxy可以跑满10Gbps-New benchmark of HAProxy at 10 Gbps using Myricom's 10GbE NICs (Myri-10G PCI-Express);另外,openstack高可用也有用haproxy的。
|
||||||
|
|
||||||
keepalived观其名可知,保持存活,它是基于VRRP协议保证所谓的高可用或热备的,这里用来预防haproxy的单点故障。
|
keepalived观其名可知,保持存活,它是基于VRRP协议保证所谓的高可用或热备的,这里用来预防haproxy的单点故障。
|
||||||
|
@ -44,12 +29,12 @@ keepalived与haproxy配合,实现master的高可用过程如下:
|
||||||
#### 配置haproxy (roles/ex-lb/templates/haproxy.cfg.j2)
|
#### 配置haproxy (roles/ex-lb/templates/haproxy.cfg.j2)
|
||||||
|
|
||||||
配置由全局配置和三个listen配置组成:
|
配置由全局配置和三个listen配置组成:
|
||||||
- listen kube-master 用于转发至多个apiserver
|
- listen kube_master 用于转发至多个apiserver
|
||||||
- listen ingress-node 用于转发至node节点的ingress http服务,[参阅](../op/loadballance_ingress_nodeport.md)
|
- listen ingress-node 用于转发至node节点的ingress http服务,[参阅](../op/loadballance_ingress_nodeport.md)
|
||||||
- listen ingress-node-tls 用于转发至node节点的ingress https服务
|
- listen ingress-node-tls 用于转发至node节点的ingress https服务
|
||||||
|
|
||||||
如果用apt安装的话,可以在/usr/share/doc/haproxy目录下找到配置指南configuration.txt.gz,全局和默认配置这里不展开,关注`listen` 代理设置模块,各项配置说明:
|
如果用apt安装的话,可以在/usr/share/doc/haproxy目录下找到配置指南configuration.txt.gz,全局和默认配置这里不展开,关注`listen` 代理设置模块,各项配置说明:
|
||||||
+ 名称 kube-master
|
+ 名称 kube_master
|
||||||
+ bind 监听客户端请求的地址/端口,保证监听master的VIP地址和端口
|
+ bind 监听客户端请求的地址/端口,保证监听master的VIP地址和端口
|
||||||
+ mode 选择四层负载模式 (当然你也可以选择七层负载,请查阅指南,适当调整)
|
+ mode 选择四层负载模式 (当然你也可以选择七层负载,请查阅指南,适当调整)
|
||||||
+ balance 选择负载算法 (负载算法也有很多供选择)
|
+ balance 选择负载算法 (负载算法也有很多供选择)
|
||||||
|
@ -71,12 +56,12 @@ vrrp_script check-haproxy {
|
||||||
weight -60
|
weight -60
|
||||||
}
|
}
|
||||||
|
|
||||||
vrrp_instance VI-kube-master {
|
vrrp_instance VI-kube_master {
|
||||||
state MASTER
|
state MASTER
|
||||||
priority 120
|
priority 120
|
||||||
unicast_src_ip {{ inventory_hostname }}
|
unicast_src_ip {{ inventory_hostname }}
|
||||||
unicast_peer {
|
unicast_peer {
|
||||||
{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %}
|
{% for h in groups['ex_lb'] %}{% if h != inventory_hostname %}
|
||||||
{{ h }}
|
{{ h }}
|
||||||
{% endif %}{% endfor %}
|
{% endif %}{% endfor %}
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,12 +68,11 @@ Use "ezctl help <command>" for more information about a given command.
|
||||||
~# ezctl new k8s-01
|
~# ezctl new k8s-01
|
||||||
2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01
|
2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01
|
||||||
2021-01-19 10:48:23 DEBUG set version of common plugins
|
2021-01-19 10:48:23 DEBUG set version of common plugins
|
||||||
2021-01-19 10:48:23 DEBUG disable registry mirrors
|
|
||||||
2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created.
|
2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created.
|
||||||
2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
|
2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
|
||||||
2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'
|
2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'
|
||||||
```
|
```
|
||||||
然后根据提示配置'/etc/kubeasz/clusters/k8s-01/hosts' 和 '/etc/kubeasz/clusters/k8s-01/config.yml';为方便测试我们在hosts里面设置单节点集群(etcd/kube-master/kube-node配置同一个节点,注意节点需先设置ssh免密码登陆), config.yml 使用默认配置即可。
|
然后根据提示配置'/etc/kubeasz/clusters/k8s-01/hosts' 和 '/etc/kubeasz/clusters/k8s-01/config.yml';为方便测试我们在hosts里面设置单节点集群(etcd/kube_master/kube_node配置同一个节点,注意节点需先设置ssh免密码登陆), config.yml 使用默认配置即可。
|
||||||
|
|
||||||
- 2.然后开始安装集群
|
- 2.然后开始安装集群
|
||||||
|
|
||||||
|
|
|
@ -60,9 +60,9 @@ roles/calico/
|
||||||
|
|
||||||
+ 安装前检查主机名不能有大写字母,只能由`小写字母` `-` `.` 组成 (name must consist of lower case alphanumeric characters, '-' or '.' (regex: [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*))(calico-node v3.0.6以上已经解决主机大写字母问题)
|
+ 安装前检查主机名不能有大写字母,只能由`小写字母` `-` `.` 组成 (name must consist of lower case alphanumeric characters, '-' or '.' (regex: [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*))(calico-node v3.0.6以上已经解决主机大写字母问题)
|
||||||
+ **安装前必须确保各节点主机名不重复** ,calico node name 由节点主机名决定,如果重复,那么重复节点在etcd中只存储一份配置,BGP 邻居也不会建立。
|
+ **安装前必须确保各节点主机名不重复** ,calico node name 由节点主机名决定,如果重复,那么重复节点在etcd中只存储一份配置,BGP 邻居也不会建立。
|
||||||
+ 安装之前必须确保`kube-master`和`kube-node`节点已经成功部署
|
+ 安装之前必须确保`kube_master`和`kube_node`节点已经成功部署
|
||||||
+ 只需要在任意装有kubectl客户端的节点运行 `kubectl apply -f`安装即可
|
+ 只需要在任意装有kubectl客户端的节点运行 `kubectl apply -f`安装即可
|
||||||
+ 等待15s后(视网络拉取calico相关镜像速度),calico 网络插件安装完成,删除之前kube-node安装时默认cni网络配置
|
+ 等待15s后(视网络拉取calico相关镜像速度),calico 网络插件安装完成,删除之前kube_node安装时默认cni网络配置
|
||||||
|
|
||||||
### [可选]配置calicoctl工具 [calicoctl.cfg.j2](roles/calico/templates/calicoctl.cfg.j2)
|
### [可选]配置calicoctl工具 [calicoctl.cfg.j2](roles/calico/templates/calicoctl.cfg.j2)
|
||||||
|
|
||||||
|
|
|
@ -86,9 +86,9 @@ FLANNEL_IPMASQ=true
|
||||||
```
|
```
|
||||||
### 安装 flannel网络
|
### 安装 flannel网络
|
||||||
|
|
||||||
+ 安装之前必须确保kube-master和kube-node节点已经成功部署
|
+ 安装之前必须确保kube_master和kube_node节点已经成功部署
|
||||||
+ 只需要在任意装有kubectl客户端的节点运行 kubectl create安装即可
|
+ 只需要在任意装有kubectl客户端的节点运行 kubectl create安装即可
|
||||||
+ 等待15s后(视网络拉取相关镜像速度),flannel 网络插件安装完成,删除之前kube-node安装时默认cni网络配置
|
+ 等待15s后(视网络拉取相关镜像速度),flannel 网络插件安装完成,删除之前kube_node安装时默认cni网络配置
|
||||||
|
|
||||||
### 验证flannel网络
|
### 验证flannel网络
|
||||||
|
|
||||||
|
|
|
@ -113,7 +113,7 @@ flannel_offline: "flannel_{{ flannelVer }}.tar"
|
||||||
CALICO_IPV4POOL_IPIP: "Always"
|
CALICO_IPV4POOL_IPIP: "Always"
|
||||||
|
|
||||||
# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
|
# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
|
||||||
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube-master'][0] }}"
|
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"
|
||||||
|
|
||||||
# [calico]设置calico 网络 backend: brid, vxlan, none
|
# [calico]设置calico 网络 backend: brid, vxlan, none
|
||||||
CALICO_NETWORKING_BACKEND: "brid"
|
CALICO_NETWORKING_BACKEND: "brid"
|
||||||
|
@ -139,7 +139,7 @@ cilium_offline: "cilium_{{ cilium_ver }}.tar"
|
||||||
|
|
||||||
# ------------------------------------------- kube-ovn
|
# ------------------------------------------- kube-ovn
|
||||||
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
|
# [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
|
||||||
OVN_DB_NODE: "{{ groups['kube-master'][0] }}"
|
OVN_DB_NODE: "{{ groups['kube_master'][0] }}"
|
||||||
|
|
||||||
# [kube-ovn]离线镜像tar包
|
# [kube-ovn]离线镜像tar包
|
||||||
kube_ovn_ver: "__kube_ovn__"
|
kube_ovn_ver: "__kube_ovn__"
|
||||||
|
|
|
@ -3,11 +3,11 @@
|
||||||
192.168.1.1
|
192.168.1.1
|
||||||
|
|
||||||
# master node(s)
|
# master node(s)
|
||||||
[kube-master]
|
[kube_master]
|
||||||
192.168.1.1
|
192.168.1.1
|
||||||
|
|
||||||
# work node(s)
|
# work node(s)
|
||||||
[kube-node]
|
[kube_node]
|
||||||
192.168.1.1
|
192.168.1.1
|
||||||
|
|
||||||
# [optional] harbor server, a private docker registry
|
# [optional] harbor server, a private docker registry
|
||||||
|
@ -17,7 +17,7 @@
|
||||||
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes
|
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes
|
||||||
|
|
||||||
# [optional] loadbalance for accessing k8s from outside
|
# [optional] loadbalance for accessing k8s from outside
|
||||||
[ex-lb]
|
[ex_lb]
|
||||||
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
||||||
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
||||||
|
|
||||||
|
|
|
@ -5,12 +5,12 @@
|
||||||
192.168.1.3
|
192.168.1.3
|
||||||
|
|
||||||
# master node(s)
|
# master node(s)
|
||||||
[kube-master]
|
[kube_master]
|
||||||
192.168.1.1
|
192.168.1.1
|
||||||
192.168.1.2
|
192.168.1.2
|
||||||
|
|
||||||
# work node(s)
|
# work node(s)
|
||||||
[kube-node]
|
[kube_node]
|
||||||
192.168.1.3
|
192.168.1.3
|
||||||
192.168.1.4
|
192.168.1.4
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes
|
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no SELF_SIGNED_CERT=yes
|
||||||
|
|
||||||
# [optional] loadbalance for accessing k8s from outside
|
# [optional] loadbalance for accessing k8s from outside
|
||||||
[ex-lb]
|
[ex_lb]
|
||||||
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
||||||
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
|
||||||
|
|
||||||
|
|
18
ezctl
18
ezctl
|
@ -294,10 +294,10 @@ function add-node() {
|
||||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||||
|
|
||||||
# check if the new node already exsited
|
# check if the new node already exsited
|
||||||
sed -n '/^\[kube-master/,/^\[harbor/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "node $2 already existed in $BASE/clusters/$1/hosts"; return 2; }
|
sed -n '/^\[kube_master/,/^\[harbor/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "node $2 already existed in $BASE/clusters/$1/hosts"; return 2; }
|
||||||
|
|
||||||
logger info "add $2 into 'kube-node' group"
|
logger info "add $2 into 'kube_node' group"
|
||||||
sed -i "/\[kube-node/a $2 NEW_NODE=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
sed -i "/\[kube_node/a $2 NEW_NODE=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
||||||
|
|
||||||
logger info "start to add a work node:$2 into cluster:$1"
|
logger info "start to add a work node:$2 into cluster:$1"
|
||||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/22.addnode.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/22.addnode.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
||||||
|
@ -308,15 +308,15 @@ function add-master() {
|
||||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||||
|
|
||||||
# check if the new master already exsited
|
# check if the new master already exsited
|
||||||
sed -n '/^\[kube-master/,/^\[kube-node/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "master $2 already existed!"; return 2; }
|
sed -n '/^\[kube_master/,/^\[kube_node/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "master $2 already existed!"; return 2; }
|
||||||
|
|
||||||
logger info "add $2 into 'kube-master' group"
|
logger info "add $2 into 'kube_master' group"
|
||||||
sed -i "/\[kube-master/a $2 NEW_MASTER=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
sed -i "/\[kube_master/a $2 NEW_MASTER=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
||||||
|
|
||||||
logger info "start to add a master node:$2 into cluster:$1"
|
logger info "start to add a master node:$2 into cluster:$1"
|
||||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/23.addmaster.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/23.addmaster.yml" -e "NODE_TO_ADD=$2" -e "@clusters/$1/config.yml"
|
||||||
|
|
||||||
logger info "reconfigure and restart the haproxy service on 'kube-node' nodes"
|
logger info "reconfigure and restart the haproxy service on 'kube_node' nodes"
|
||||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml"
|
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ function add-etcd() {
|
||||||
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
[[ $2 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { logger error "Invalid ip add:$2"; return 1; }
|
||||||
|
|
||||||
# check if the new node already exsited
|
# check if the new node already exsited
|
||||||
sed -n '/^\[etcd/,/^\[kube-master/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "etcd $2 already existed!"; return 2; }
|
sed -n '/^\[etcd/,/^\[kube_master/p' "$BASE/clusters/$1/hosts"|grep "^$2[^0-9]*$" && { logger error "etcd $2 already existed!"; return 2; }
|
||||||
|
|
||||||
logger info "add $2 into 'etcd' group"
|
logger info "add $2 into 'etcd' group"
|
||||||
sed -i "/\[etcd/a $2 NEW_ETCD=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
sed -i "/\[etcd/a $2 NEW_ETCD=yes ${@:3}" "$BASE/clusters/$1/hosts"
|
||||||
|
@ -372,7 +372,7 @@ function del-master() {
|
||||||
logger info "reconfig kubeconfig in ansible manage node"
|
logger info "reconfig kubeconfig in ansible manage node"
|
||||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/roles/deploy/deploy.yml" -t create_kctl_cfg -e "@clusters/$1/config.yml"
|
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/roles/deploy/deploy.yml" -t create_kctl_cfg -e "@clusters/$1/config.yml"
|
||||||
|
|
||||||
logger info "reconfigure and restart the haproxy service on 'kube-node' nodes"
|
logger info "reconfigure and restart the haproxy service on 'kube_node' nodes"
|
||||||
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml"
|
ansible-playbook -i "$BASE/clusters/$1/hosts" "$BASE/playbooks/05.kube-node.yml" -t restart_lb -e MASTER_CHG=yes -e "@clusters/$1/config.yml"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
# [optional] to synchronize system time of nodes with 'chrony'
|
# [optional] to synchronize system time of nodes with 'chrony'
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- etcd
|
- etcd
|
||||||
- ex-lb
|
- ex_lb
|
||||||
- chrony
|
- chrony
|
||||||
roles:
|
roles:
|
||||||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||||
|
@ -15,8 +15,8 @@
|
||||||
|
|
||||||
# prepare tasks for all nodes
|
# prepare tasks for all nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- etcd
|
- etcd
|
||||||
roles:
|
roles:
|
||||||
- prepare
|
- prepare
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# to install a container runtime
|
# to install a container runtime
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
||||||
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
# to set up 'kube-master' nodes
|
# to set up 'kube_master' nodes
|
||||||
- hosts: kube-master
|
- hosts: kube_master
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
tasks:
|
tasks:
|
||||||
- name: Making master nodes SchedulingDisabled
|
- name: Making master nodes SchedulingDisabled
|
||||||
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
||||||
when: "inventory_hostname not in groups['kube-node']"
|
when: "inventory_hostname not in groups['kube_node']"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: Setting master role name
|
- name: Setting master role name
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# to set up 'kube-node' nodes
|
# to set up 'kube_node' nodes
|
||||||
- hosts: kube-node
|
- hosts: kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: kube-node, when: "inventory_hostname not in groups['kube-master']" }
|
- { role: kube-node, when: "inventory_hostname not in groups['kube_master']" }
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# to install network plugin, only one can be choosen
|
# to install network plugin, only one can be choosen
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
||||||
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# to install clust-addons
|
# to install clust-addons
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- cluster-addon
|
- cluster-addon
|
||||||
|
|
|
@ -16,8 +16,8 @@
|
||||||
when: hostvars[groups.harbor[0]]['SELF_SIGNED_CERT'] == 'yes'
|
when: hostvars[groups.harbor[0]]['SELF_SIGNED_CERT'] == 'yes'
|
||||||
|
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
tasks:
|
tasks:
|
||||||
- name: Define 'harbor_hostname', a domain name
|
- name: Define 'harbor_hostname', a domain name
|
||||||
set_fact: harbor_hostname={{ hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] }}
|
set_fact: harbor_hostname={{ hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] }}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
tasks:
|
tasks:
|
||||||
- name: Making master nodes SchedulingDisabled
|
- name: Making master nodes SchedulingDisabled
|
||||||
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} "
|
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} "
|
||||||
when: "inventory_hostname not in groups['kube-node']"
|
when: "inventory_hostname not in groups['kube_node']"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: Setting master role name
|
- name: Setting master role name
|
||||||
|
|
|
@ -73,7 +73,7 @@
|
||||||
|
|
||||||
# lineinfile is inadequate to delete lines between some specific line range
|
# lineinfile is inadequate to delete lines between some specific line range
|
||||||
- name: remove the etcd's node entry in hosts
|
- name: remove the etcd's node entry in hosts
|
||||||
shell: 'sed -i "/^\[etcd/,/^\[kube-master/ {/^{{ ETCD_TO_DEL }}[^0-9]/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
shell: 'sed -i "/^\[etcd/,/^\[kube_master/ {/^{{ ETCD_TO_DEL }}[^0-9]/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
when: "groups['etcd']|length > 1 and ETCD_TO_DEL in groups['etcd']"
|
when: "groups['etcd']|length > 1 and ETCD_TO_DEL in groups['etcd']"
|
||||||
|
|
|
@ -2,8 +2,8 @@
|
||||||
|
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
tasks:
|
tasks:
|
||||||
- fail: msg="you CAN NOT delete the last member of kube-master!"
|
- fail: msg="you CAN NOT delete the last member of kube_master!"
|
||||||
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
when: "groups['kube_master']|length < 2 and NODE_TO_DEL in groups['kube_master']"
|
||||||
|
|
||||||
- name: run kubectl drain @{{ NODE_TO_DEL }}
|
- name: run kubectl drain @{{ NODE_TO_DEL }}
|
||||||
shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force"
|
shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force"
|
||||||
|
@ -26,6 +26,6 @@
|
||||||
|
|
||||||
# lineinfile is inadequate to delete lines between some specific line range
|
# lineinfile is inadequate to delete lines between some specific line range
|
||||||
- name: remove the node's entry in hosts
|
- name: remove the node's entry in hosts
|
||||||
shell: 'sed -i "/^\[kube-node/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
shell: 'sed -i "/^\[kube_node/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
# WARNNING: this playbook will clean the kube-master node {{ NODE_TO_DEL }}
|
# WARNNING: this playbook will clean the kube_master node {{ NODE_TO_DEL }}
|
||||||
|
|
||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
tasks:
|
tasks:
|
||||||
- fail: msg="you CAN NOT delete the last member of kube-master!"
|
- fail: msg="you CAN NOT delete the last member of kube_master!"
|
||||||
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
when: "groups['kube_master']|length < 2 and NODE_TO_DEL in groups['kube_master']"
|
||||||
|
|
||||||
- name: run kubectl drain @{{ NODE_TO_DEL }}
|
- name: run kubectl drain @{{ NODE_TO_DEL }}
|
||||||
shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force"
|
shell: "{{ base_dir }}/bin/kubectl drain {{ NODE_TO_DEL }} --ignore-daemonsets --delete-emptydir-data --force"
|
||||||
|
@ -27,6 +27,6 @@
|
||||||
|
|
||||||
# lineinfile is inadequate to delete lines between some specific line range
|
# lineinfile is inadequate to delete lines between some specific line range
|
||||||
- name: remove the master's entry in hosts
|
- name: remove the master's entry in hosts
|
||||||
shell: 'sed -i "/^\[kube-master/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
shell: 'sed -i "/^\[kube_master/,/^\[harbor/ {/^{{ NODE_TO_DEL }}[^0-9]*$/d}" {{ base_dir }}/clusters/{{ CLUSTER }}/hosts'
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
# [optional] to synchronize time of nodes with 'chrony'
|
# [optional] to synchronize time of nodes with 'chrony'
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- etcd
|
- etcd
|
||||||
- ex-lb
|
- ex_lb
|
||||||
- chrony
|
- chrony
|
||||||
roles:
|
roles:
|
||||||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||||
|
@ -15,8 +15,8 @@
|
||||||
|
|
||||||
# prepare tasks for all nodes
|
# prepare tasks for all nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- etcd
|
- etcd
|
||||||
roles:
|
roles:
|
||||||
- prepare
|
- prepare
|
||||||
|
@ -28,36 +28,36 @@
|
||||||
|
|
||||||
# to install container runtime
|
# to install container runtime
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
||||||
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
||||||
|
|
||||||
# to set up 'kube-master' nodes
|
# to set up 'kube_master' nodes
|
||||||
- hosts: kube-master
|
- hosts: kube_master
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
tasks:
|
tasks:
|
||||||
- name: Making master nodes SchedulingDisabled
|
- name: Making master nodes SchedulingDisabled
|
||||||
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
||||||
when: "inventory_hostname not in groups['kube-node']"
|
when: "inventory_hostname not in groups['kube_node']"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: Setting master role name
|
- name: Setting master role name
|
||||||
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
|
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
# to set up 'kube-node' nodes
|
# to set up 'kube_node' nodes
|
||||||
- hosts: kube-node
|
- hosts: kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: kube-node, when: "inventory_hostname not in groups['kube-master']" }
|
- { role: kube-node, when: "inventory_hostname not in groups['kube_master']" }
|
||||||
|
|
||||||
# to install network plugin, only one can be choosen
|
# to install network plugin, only one can be choosen
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
||||||
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
||||||
|
@ -67,6 +67,6 @@
|
||||||
|
|
||||||
# to install cluster-addons
|
# to install cluster-addons
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- cluster-addon
|
- cluster-addon
|
||||||
|
|
|
@ -3,9 +3,9 @@
|
||||||
- name: starting etcd cluster
|
- name: starting etcd cluster
|
||||||
service: name=etcd state=started enabled=yes
|
service: name=etcd state=started enabled=yes
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube_master
|
||||||
tasks:
|
tasks:
|
||||||
- name: starting kube-master services
|
- name: starting kube_master services
|
||||||
service: name={{ item }} state=started enabled=yes
|
service: name={{ item }} state=started enabled=yes
|
||||||
with_items:
|
with_items:
|
||||||
- kube-apiserver
|
- kube-apiserver
|
||||||
|
@ -13,8 +13,8 @@
|
||||||
- kube-scheduler
|
- kube-scheduler
|
||||||
|
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
tasks:
|
tasks:
|
||||||
- name: starting docker
|
- name: starting docker
|
||||||
service: name=docker state=started enabled=yes
|
service: name=docker state=started enabled=yes
|
||||||
|
@ -24,19 +24,19 @@
|
||||||
service: name=containerd state=started enabled=yes
|
service: name=containerd state=started enabled=yes
|
||||||
when: "CONTAINER_RUNTIME == 'containerd'"
|
when: "CONTAINER_RUNTIME == 'containerd'"
|
||||||
|
|
||||||
- name: starting haproxy on kube-node
|
- name: starting haproxy on kube_node
|
||||||
service: name=haproxy state=started enabled=yes
|
service: name=haproxy state=started enabled=yes
|
||||||
when:
|
when:
|
||||||
- "inventory_hostname not in groups['kube-master']"
|
- "inventory_hostname not in groups['kube_master']"
|
||||||
- "groups['kube-master']|length > 1"
|
- "groups['kube_master']|length > 1"
|
||||||
|
|
||||||
- name: starting kube-node services
|
- name: starting kube_node services
|
||||||
service: name={{ item }} state=started enabled=yes
|
service: name={{ item }} state=started enabled=yes
|
||||||
with_items:
|
with_items:
|
||||||
- kubelet
|
- kubelet
|
||||||
- kube-proxy
|
- kube-proxy
|
||||||
|
|
||||||
- hosts: ex-lb
|
- hosts: ex_lb
|
||||||
tasks:
|
tasks:
|
||||||
- name: starting external loadbalance
|
- name: starting external loadbalance
|
||||||
service: name={{ item }} state=started enabled=yes
|
service: name={{ item }} state=started enabled=yes
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
- hosts: kube-master
|
- hosts: kube_master
|
||||||
tasks:
|
tasks:
|
||||||
- name: stopping kube-master services
|
- name: stopping kube_master services
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
with_items:
|
with_items:
|
||||||
- kube-apiserver
|
- kube-apiserver
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
- name: stopping etcd cluster
|
- name: stopping etcd cluster
|
||||||
service: name=etcd state=stopped enabled=no
|
service: name=etcd state=stopped enabled=no
|
||||||
|
|
||||||
- hosts: ex-lb
|
- hosts: ex_lb
|
||||||
tasks:
|
tasks:
|
||||||
- name: stopping external loadbalance
|
- name: stopping external loadbalance
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
|
@ -21,16 +21,16 @@
|
||||||
- keepalived
|
- keepalived
|
||||||
|
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
tasks:
|
tasks:
|
||||||
- name: stopping haproxy on kube-node
|
- name: stopping haproxy on kube_node
|
||||||
service: name=haproxy state=stopped enabled=no
|
service: name=haproxy state=stopped enabled=no
|
||||||
when:
|
when:
|
||||||
- "inventory_hostname not in groups['kube-master']"
|
- "inventory_hostname not in groups['kube_master']"
|
||||||
- "groups['kube-master']|length > 1"
|
- "groups['kube_master']|length > 1"
|
||||||
|
|
||||||
- name: stopping kube-node services
|
- name: stopping kube_node services
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
with_items:
|
with_items:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
# Usage: ezctl upgrade <cluster_name>
|
# Usage: ezctl upgrade <cluster_name>
|
||||||
|
|
||||||
# check k8s version
|
# check k8s version
|
||||||
- hosts: kube-master
|
- hosts: kube_master
|
||||||
tasks:
|
tasks:
|
||||||
- name: get running k8s version
|
- name: get running k8s version
|
||||||
shell: "{{ bin_dir }}/kube-apiserver --version"
|
shell: "{{ bin_dir }}/kube-apiserver --version"
|
||||||
|
@ -30,13 +30,13 @@
|
||||||
|
|
||||||
# update masters
|
# update masters
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
# update nodes
|
# update nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- { role: kube-node, when: "inventory_hostname not in groups['kube-master']" }
|
- { role: kube-node, when: "inventory_hostname not in groups['kube_master']" }
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
# Make sure you know what you are doing.
|
# Make sure you know what you are doing.
|
||||||
|
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- ex-lb
|
- ex_lb
|
||||||
- etcd
|
- etcd
|
||||||
vars:
|
vars:
|
||||||
DEL_MASTER: "yes"
|
DEL_MASTER: "yes"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- etcd
|
- etcd
|
||||||
- ex-lb
|
- ex_lb
|
||||||
- chrony
|
- chrony
|
||||||
roles:
|
roles:
|
||||||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- cilium
|
- cilium
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# 是否删除 kube-master 相关服务
|
# 是否删除 kube_master 相关服务
|
||||||
DEL_MASTER: "no"
|
DEL_MASTER: "no"
|
||||||
|
|
||||||
# 是否删除 kube-node 相关服务
|
# 是否删除 kube_node 相关服务
|
||||||
DEL_NODE: "no"
|
DEL_NODE: "no"
|
||||||
|
|
||||||
# 是否删除 etc 相关服务
|
# 是否删除 etc 相关服务
|
||||||
|
|
|
@ -12,4 +12,4 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "/etc/haproxy"
|
- "/etc/haproxy"
|
||||||
- "/etc/keepalived"
|
- "/etc/keepalived"
|
||||||
when: "inventory_hostname in groups['kube-node'] or inventory_hostname in groups['ex-lb']"
|
when: "inventory_hostname in groups['kube_node'] or inventory_hostname in groups['ex_lb']"
|
||||||
|
|
|
@ -1,18 +1,18 @@
|
||||||
# to clean 'kube-master' nodes
|
# to clean 'kube_master' nodes
|
||||||
- name: stop and disable kube-master service
|
- name: stop and disable kube_master service
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
with_items:
|
with_items:
|
||||||
- kube-apiserver
|
- kube-apiserver
|
||||||
- kube-controller-manager
|
- kube-controller-manager
|
||||||
- kube-scheduler
|
- kube-scheduler
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
when: "inventory_hostname in groups['kube_master']"
|
||||||
|
|
||||||
- name: remove files and dirs of 'kube-master' nodes
|
- name: remove files and dirs of 'kube_master' nodes
|
||||||
file: name={{ item }} state=absent
|
file: name={{ item }} state=absent
|
||||||
with_items:
|
with_items:
|
||||||
- "/var/run/kubernetes"
|
- "/var/run/kubernetes"
|
||||||
- "/etc/systemd/system/kube-apiserver.service"
|
- "/etc/systemd/system/kube-apiserver.service"
|
||||||
- "/etc/systemd/system/kube-controller-manager.service"
|
- "/etc/systemd/system/kube-controller-manager.service"
|
||||||
- "/etc/systemd/system/kube-scheduler.service"
|
- "/etc/systemd/system/kube-scheduler.service"
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
when: "inventory_hostname in groups['kube_master']"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# to clean 'kube-node' nodes
|
# to clean 'kube_node' nodes
|
||||||
- block:
|
- block:
|
||||||
- name: stop and disable kube-node service
|
- name: stop and disable kube_node service
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
with_items:
|
with_items:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
@ -13,7 +13,7 @@
|
||||||
warn: false
|
warn: false
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: remove files and dirs of 'kube-node' nodes
|
- name: remove files and dirs of 'kube_node' nodes
|
||||||
file: name={{ item }} state=absent
|
file: name={{ item }} state=absent
|
||||||
with_items:
|
with_items:
|
||||||
- "/var/lib/kubelet/"
|
- "/var/lib/kubelet/"
|
||||||
|
@ -160,4 +160,4 @@
|
||||||
# && iptables -F -t raw && iptables -X -t raw \
|
# && iptables -F -t raw && iptables -X -t raw \
|
||||||
# && iptables -F -t mangle && iptables -X -t mangle"
|
# && iptables -F -t mangle && iptables -X -t mangle"
|
||||||
|
|
||||||
when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']"
|
when: "inventory_hostname in groups['kube_master'] or inventory_hostname in groups['kube_node']"
|
||||||
|
|
|
@ -58,7 +58,7 @@ kubelet:
|
||||||
kubeControllerManager:
|
kubeControllerManager:
|
||||||
enabled: true
|
enabled: true
|
||||||
endpoints:
|
endpoints:
|
||||||
{% for h in groups['kube-master'] %}
|
{% for h in groups['kube_master'] %}
|
||||||
- {{ h }}
|
- {{ h }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ kubeEtcd:
|
||||||
kubeScheduler:
|
kubeScheduler:
|
||||||
enabled: true
|
enabled: true
|
||||||
endpoints:
|
endpoints:
|
||||||
{% for h in groups['kube-master'] %}
|
{% for h in groups['kube_master'] %}
|
||||||
- {{ h }}
|
- {{ h }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
@ -97,11 +97,11 @@ kubeScheduler:
|
||||||
kubeProxy:
|
kubeProxy:
|
||||||
enabled: true
|
enabled: true
|
||||||
endpoints:
|
endpoints:
|
||||||
{% for h in groups['kube-master'] %}
|
{% for h in groups['kube_master'] %}
|
||||||
- {{ h }}
|
- {{ h }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for h in groups['kube-node'] %}
|
{% for h in groups['kube_node'] %}
|
||||||
{% if h not in groups['kube-master'] %}
|
{% if h not in groups['kube_master'] %}
|
||||||
- {{ h }}
|
- {{ h }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
|
@ -46,8 +46,11 @@
|
||||||
- import_tasks: create-kube-scheduler-kubeconfig.yml
|
- import_tasks: create-kube-scheduler-kubeconfig.yml
|
||||||
|
|
||||||
# ansible 控制端一些易用性配置
|
# ansible 控制端一些易用性配置
|
||||||
- name: 本地创建 ezctl 工具的软连接
|
- name: 本地创建 ezdown/ezctl 工具的软连接
|
||||||
file: src={{ base_dir }}/ezctl dest=/usr/bin/ezctl state=link
|
file: src={{ base_dir }}/{{ item }} dest=/usr/bin/{{ item }} state=link
|
||||||
|
with_items:
|
||||||
|
- ezdown
|
||||||
|
- ezctl
|
||||||
|
|
||||||
- name: ansible 控制端创建 kubectl 软链接
|
- name: ansible 控制端创建 kubectl 软链接
|
||||||
file: src={{ base_dir }}/bin/kubectl dest=/usr/bin/kubectl state=link
|
file: src={{ base_dir }}/bin/kubectl dest=/usr/bin/kubectl state=link
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# apiserver 默认第一个master节点
|
# apiserver 默认第一个master节点
|
||||||
KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443"
|
KUBE_APISERVER: "https://{{ groups['kube_master'][0] }}:6443"
|
||||||
|
|
||||||
#
|
#
|
||||||
ADD_KCFG: false
|
ADD_KCFG: false
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- ex-lb
|
- ex_lb
|
||||||
tasks:
|
tasks:
|
||||||
- block:
|
- block:
|
||||||
- name: stop and disable chrony in Ubuntu
|
- name: stop and disable chrony in Ubuntu
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
- hosts: ex-lb
|
- hosts: ex_lb
|
||||||
roles:
|
roles:
|
||||||
- ex-lb
|
- ex-lb
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# ex-lb 节点成员不能同时是 kube-node 节点,因为它们都需要安装 haproxy
|
# ex_lb 节点成员不能同时是 kube_node 节点,因为它们都需要安装 haproxy
|
||||||
- name: fail info1
|
- name: fail info1
|
||||||
fail: msg="an 'ex-lb' node CAN NOT be a 'kube-node' node at the same time"
|
fail: msg="an 'ex_lb' node CAN NOT be a 'kube_node' node at the same time"
|
||||||
when: "inventory_hostname in groups['kube-node']"
|
when: "inventory_hostname in groups['kube_node']"
|
||||||
|
|
||||||
# 自动设置LB节点变量'LB_IF'
|
# 自动设置LB节点变量'LB_IF'
|
||||||
- name: 注册变量 LB_IF_TMP
|
- name: 注册变量 LB_IF_TMP
|
||||||
|
|
|
@ -13,14 +13,14 @@ defaults
|
||||||
timeout client 10m
|
timeout client 10m
|
||||||
timeout server 10m
|
timeout server 10m
|
||||||
|
|
||||||
listen kube-master
|
listen kube_master
|
||||||
bind 0.0.0.0:{{ EX_APISERVER_PORT }}
|
bind 0.0.0.0:{{ EX_APISERVER_PORT }}
|
||||||
mode tcp
|
mode tcp
|
||||||
option tcplog
|
option tcplog
|
||||||
option dontlognull
|
option dontlognull
|
||||||
option dontlog-normal
|
option dontlog-normal
|
||||||
balance {{ BALANCE_ALG }}
|
balance {{ BALANCE_ALG }}
|
||||||
{% for host in groups['kube-master'] %}
|
{% for host in groups['kube_master'] %}
|
||||||
server {{ host }} {{ host }}:6443 check inter 5s fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:6443 check inter 5s fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
@ -32,12 +32,12 @@ listen ingress-node
|
||||||
option dontlognull
|
option dontlognull
|
||||||
option dontlog-normal
|
option dontlog-normal
|
||||||
balance {{ BALANCE_ALG }}
|
balance {{ BALANCE_ALG }}
|
||||||
{% if groups['kube-node']|length > 3 %}
|
{% if groups['kube_node']|length > 3 %}
|
||||||
server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23456 check inter 5s fall 2 rise 2 weight 1
|
server {{ groups['kube_node'][0] }} {{ groups['kube_node'][0] }}:23456 check inter 5s fall 2 rise 2 weight 1
|
||||||
server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23456 check inter 5s fall 2 rise 2 weight 1
|
server {{ groups['kube_node'][1] }} {{ groups['kube_node'][1] }}:23456 check inter 5s fall 2 rise 2 weight 1
|
||||||
server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23456 check inter 5s fall 2 rise 2 weight 1
|
server {{ groups['kube_node'][2] }} {{ groups['kube_node'][2] }}:23456 check inter 5s fall 2 rise 2 weight 1
|
||||||
{% else %}
|
{% else %}
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube_node'] %}
|
||||||
server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:23456 check inter 5s fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -51,12 +51,12 @@ listen ingress-node-tls
|
||||||
option dontlognull
|
option dontlognull
|
||||||
option dontlog-normal
|
option dontlog-normal
|
||||||
balance {{ BALANCE_ALG }}
|
balance {{ BALANCE_ALG }}
|
||||||
{% if groups['kube-node']|length > 3 %}
|
{% if groups['kube_node']|length > 3 %}
|
||||||
server {{ groups['kube-node'][0] }} {{ groups['kube-node'][0] }}:23457 check inter 5s fall 2 rise 2 weight 1
|
server {{ groups['kube_node'][0] }} {{ groups['kube_node'][0] }}:23457 check inter 5s fall 2 rise 2 weight 1
|
||||||
server {{ groups['kube-node'][1] }} {{ groups['kube-node'][1] }}:23457 check inter 5s fall 2 rise 2 weight 1
|
server {{ groups['kube_node'][1] }} {{ groups['kube_node'][1] }}:23457 check inter 5s fall 2 rise 2 weight 1
|
||||||
server {{ groups['kube-node'][2] }} {{ groups['kube-node'][2] }}:23457 check inter 5s fall 2 rise 2 weight 1
|
server {{ groups['kube_node'][2] }} {{ groups['kube_node'][2] }}:23457 check inter 5s fall 2 rise 2 weight 1
|
||||||
{% else %}
|
{% else %}
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube_node'] %}
|
||||||
server {{ host }} {{ host }}:23457 check inter 5s fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:23457 check inter 5s fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -9,12 +9,12 @@ vrrp_script check-haproxy {
|
||||||
weight -60
|
weight -60
|
||||||
}
|
}
|
||||||
|
|
||||||
vrrp_instance VI-kube-master {
|
vrrp_instance VI-kube_master {
|
||||||
state BACKUP
|
state BACKUP
|
||||||
priority {{ 119 | random(61, 1) }}
|
priority {{ 119 | random(61, 1) }}
|
||||||
unicast_src_ip {{ inventory_hostname }}
|
unicast_src_ip {{ inventory_hostname }}
|
||||||
unicast_peer {
|
unicast_peer {
|
||||||
{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %}
|
{% for h in groups['ex_lb'] %}{% if h != inventory_hostname %}
|
||||||
{{ h }}
|
{{ h }}
|
||||||
{% endif %}{% endfor %}
|
{% endif %}{% endfor %}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,12 +9,12 @@ vrrp_script check-haproxy {
|
||||||
weight -60
|
weight -60
|
||||||
}
|
}
|
||||||
|
|
||||||
vrrp_instance VI-kube-master {
|
vrrp_instance VI-kube_master {
|
||||||
state MASTER
|
state MASTER
|
||||||
priority 120
|
priority 120
|
||||||
unicast_src_ip {{ inventory_hostname }}
|
unicast_src_ip {{ inventory_hostname }}
|
||||||
unicast_peer {
|
unicast_peer {
|
||||||
{% for h in groups['ex-lb'] %}{% if h != inventory_hostname %}
|
{% for h in groups['ex_lb'] %}{% if h != inventory_hostname %}
|
||||||
{{ h }}
|
{{ h }}
|
||||||
{% endif %}{% endfor %}
|
{% endif %}{% endfor %}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
- name: 下载 kube-master 二进制
|
- name: 下载 kube_master 二进制
|
||||||
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
|
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
|
||||||
with_items:
|
with_items:
|
||||||
- kube-apiserver
|
- kube-apiserver
|
||||||
|
|
|
@ -2,10 +2,10 @@
|
||||||
"CN": "kubernetes",
|
"CN": "kubernetes",
|
||||||
"hosts": [
|
"hosts": [
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
{% if groups['ex-lb']|length > 0 %}
|
{% if groups['ex_lb']|length > 0 %}
|
||||||
"{{ hostvars[groups['ex-lb'][0]]['EX_APISERVER_VIP'] }}",
|
"{{ hostvars[groups['ex_lb'][0]]['EX_APISERVER_VIP'] }}",
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% for host in groups['kube-master'] %}
|
{% for host in groups['kube_master'] %}
|
||||||
"{{ host }}",
|
"{{ host }}",
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
- name: 创建kube-node 相关目录
|
- name: 创建kube_node 相关目录
|
||||||
file: name={{ item }} state=directory
|
file: name={{ item }} state=directory
|
||||||
with_items:
|
with_items:
|
||||||
- /var/lib/kubelet
|
- /var/lib/kubelet
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
# 每个 node 节点运行 haproxy 连接到多个 apiserver
|
# 每个 node 节点运行 haproxy 连接到多个 apiserver
|
||||||
- import_tasks: node_lb.yml
|
- import_tasks: node_lb.yml
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
when: "inventory_hostname not in groups['kube_master']"
|
||||||
|
|
||||||
- name: 替换 kubeconfig 的 apiserver 地址
|
- name: 替换 kubeconfig 的 apiserver 地址
|
||||||
lineinfile:
|
lineinfile:
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# kube-node 节点成员不能同时是 ex-lb 节点,因为它们都需要安装 haproxy
|
# kube_node 节点成员不能同时是 ex_lb 节点,因为它们都需要安装 haproxy
|
||||||
- name: fail info1
|
- name: fail info1
|
||||||
fail: msg="an 'kube-node' node CAN NOT be a 'ex-lb' node at the same time"
|
fail: msg="an 'kube_node' node CAN NOT be a 'ex_lb' node at the same time"
|
||||||
when: "inventory_hostname in groups['ex-lb']"
|
when: "inventory_hostname in groups['ex_lb']"
|
||||||
tags: restart_lb
|
tags: restart_lb
|
||||||
|
|
||||||
- name: 安装 haproxy
|
- name: 安装 haproxy
|
||||||
|
@ -40,7 +40,7 @@
|
||||||
# 仅 master 节点数大于1时才启动haproxy
|
# 仅 master 节点数大于1时才启动haproxy
|
||||||
- name: 开启haproxy服务
|
- name: 开启haproxy服务
|
||||||
shell: systemctl start haproxy
|
shell: systemctl start haproxy
|
||||||
when: "groups['kube-master']|length > 1"
|
when: "groups['kube_master']|length > 1"
|
||||||
tags: restart_lb
|
tags: restart_lb
|
||||||
|
|
||||||
# master 节点从1个增加到2个时候,需要修改如下配置
|
# master 节点从1个增加到2个时候,需要修改如下配置
|
||||||
|
@ -56,11 +56,11 @@
|
||||||
- "/etc/kubernetes/kubelet.kubeconfig"
|
- "/etc/kubernetes/kubelet.kubeconfig"
|
||||||
- "/etc/kubernetes/kube-proxy.kubeconfig"
|
- "/etc/kubernetes/kube-proxy.kubeconfig"
|
||||||
|
|
||||||
- name: restart kube-node service
|
- name: restart kube_node service
|
||||||
service: name={{ item }} state=restarted
|
service: name={{ item }} state=restarted
|
||||||
with_items:
|
with_items:
|
||||||
- kubelet
|
- kubelet
|
||||||
- kube-proxy
|
- kube-proxy
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
when: "MASTER_CHG == 'yes' and groups['kube-master']|length < 3"
|
when: "MASTER_CHG == 'yes' and groups['kube_master']|length < 3"
|
||||||
tags: restart_lb
|
tags: restart_lb
|
||||||
|
|
|
@ -12,13 +12,13 @@ defaults
|
||||||
timeout client 10m
|
timeout client 10m
|
||||||
timeout server 10m
|
timeout server 10m
|
||||||
|
|
||||||
listen kube-master
|
listen kube_master
|
||||||
bind 127.0.0.1:6443
|
bind 127.0.0.1:6443
|
||||||
mode tcp
|
mode tcp
|
||||||
option tcplog
|
option tcplog
|
||||||
option dontlognull
|
option dontlognull
|
||||||
option dontlog-normal
|
option dontlog-normal
|
||||||
balance {{ BALANCE_ALG }}
|
balance {{ BALANCE_ALG }}
|
||||||
{% for host in groups['kube-master'] %}
|
{% for host in groups['kube_master'] %}
|
||||||
server {{ host }} {{ host }}:6443 check inter 10s fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:6443 check inter 10s fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
|
@ -2,13 +2,13 @@
|
||||||
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
CLUSTER_DNS_SVC_IP: "{{ SERVICE_CIDR | ipaddr('net') | ipaddr(2) | ipaddr('address') }}"
|
||||||
|
|
||||||
# 设置 APISERVER 地址
|
# 设置 APISERVER 地址
|
||||||
KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \
|
KUBE_APISERVER: "{%- if inventory_hostname in groups['kube_master'] -%} \
|
||||||
https://{{ inventory_hostname }}:6443 \
|
https://{{ inventory_hostname }}:6443 \
|
||||||
{%- else -%} \
|
{%- else -%} \
|
||||||
{%- if groups['kube-master']|length > 1 -%} \
|
{%- if groups['kube_master']|length > 1 -%} \
|
||||||
https://127.0.0.1:6443 \
|
https://127.0.0.1:6443 \
|
||||||
{%- else -%} \
|
{%- else -%} \
|
||||||
https://{{ groups['kube-master'][0] }}:6443 \
|
https://{{ groups['kube_master'][0] }}:6443 \
|
||||||
{%- endif -%} \
|
{%- endif -%} \
|
||||||
{%- endif -%}"
|
{%- endif -%}"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
roles:
|
roles:
|
||||||
- kube-router
|
- kube-router
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
# [可选]操作系统安全加固 https://github.com/dev-sec/ansible-os-hardening
|
# [可选]操作系统安全加固 https://github.com/dev-sec/ansible-os-hardening
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube_master
|
||||||
- kube-node
|
- kube_node
|
||||||
- etcd
|
- etcd
|
||||||
- ex-lb
|
- ex_lb
|
||||||
- chrony
|
- chrony
|
||||||
vars:
|
vars:
|
||||||
os_security_users_allow: change_user
|
os_security_users_allow: change_user
|
||||||
|
|
|
@ -49,5 +49,5 @@
|
||||||
with_items:
|
with_items:
|
||||||
- kube-controller-manager.kubeconfig
|
- kube-controller-manager.kubeconfig
|
||||||
- kube-scheduler.kubeconfig
|
- kube-scheduler.kubeconfig
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
when: "inventory_hostname in groups['kube_master']"
|
||||||
when: "inventory_hostname in groups['kube-master'] or inventory_hostname in groups['kube-node']"
|
when: "inventory_hostname in groups['kube_master'] or inventory_hostname in groups['kube_node']"
|
||||||
|
|
Loading…
Reference in New Issue