mirror of https://github.com/easzlab/kubeasz.git
fix 删除master/node流程
parent
17534ed8ec
commit
2ef0e9f86c
|
@ -91,6 +91,7 @@
|
||||||
dest: /root/.kube/config
|
dest: /root/.kube/config
|
||||||
regexp: "^ server"
|
regexp: "^ server"
|
||||||
line: " server: https://{{ inventory_hostname }}:6443"
|
line: " server: https://{{ inventory_hostname }}:6443"
|
||||||
|
tags: upgrade_k8s, restart_master
|
||||||
|
|
||||||
- name: 以轮询的方式等待master服务启动完成
|
- name: 以轮询的方式等待master服务启动完成
|
||||||
command: "{{ bin_dir }}/kubectl get node"
|
command: "{{ bin_dir }}/kubectl get node"
|
||||||
|
|
|
@ -26,8 +26,15 @@ HARD_EVICTION: "memory.available<200Mi,nodefs.available<10%"
|
||||||
BALANCE_ALG: "roundrobin"
|
BALANCE_ALG: "roundrobin"
|
||||||
|
|
||||||
# 设置 APISERVER 地址
|
# 设置 APISERVER 地址
|
||||||
KUBE_APISERVER: "{%- if groups['kube-master']|length > 1 -%} \
|
KUBE_APISERVER: "{%- if inventory_hostname in groups['kube-master'] -%} \
|
||||||
|
https://{{ inventory_hostname }}:6443 \
|
||||||
|
{%- else -%} \
|
||||||
|
{%- if groups['kube-master']|length > 1 -%} \
|
||||||
https://127.0.0.1:6443 \
|
https://127.0.0.1:6443 \
|
||||||
{%- else -%} \
|
{%- else -%} \
|
||||||
https://{{ groups['kube-master'][0] }}:6443 \
|
https://{{ groups['kube-master'][0] }}:6443 \
|
||||||
|
{%- endif -%} \
|
||||||
{%- endif -%}"
|
{%- endif -%}"
|
||||||
|
|
||||||
|
# 增加/删除 master 节点时,node 节点需要重新配置 haproxy 等
|
||||||
|
MASTER_CHG: "no"
|
||||||
|
|
|
@ -25,7 +25,6 @@
|
||||||
dest: /root/.kube/config
|
dest: /root/.kube/config
|
||||||
regexp: "^ server"
|
regexp: "^ server"
|
||||||
line: " server: {{ KUBE_APISERVER }}"
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
|
||||||
|
|
||||||
##----------kubelet 配置部分--------------
|
##----------kubelet 配置部分--------------
|
||||||
|
|
||||||
|
@ -46,15 +45,6 @@
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server={{ KUBE_APISERVER }} \
|
--server={{ KUBE_APISERVER }} \
|
||||||
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
|
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
|
||||||
|
|
||||||
- name: 设置集群参数
|
|
||||||
shell: "{{ bin_dir }}/kubectl config set-cluster kubernetes \
|
|
||||||
--certificate-authority={{ ca_dir }}/ca.pem \
|
|
||||||
--embed-certs=true \
|
|
||||||
--server=https://{{ inventory_hostname }}:6443 \
|
|
||||||
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
|
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
|
||||||
|
|
||||||
- name: 设置客户端认证参数
|
- name: 设置客户端认证参数
|
||||||
shell: "{{ bin_dir }}/kubectl config set-credentials system:node:{{ inventory_hostname }} \
|
shell: "{{ bin_dir }}/kubectl config set-credentials system:node:{{ inventory_hostname }} \
|
||||||
|
@ -105,14 +95,6 @@
|
||||||
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
||||||
regexp: "^ server"
|
regexp: "^ server"
|
||||||
line: " server: {{ KUBE_APISERVER }}"
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
|
||||||
|
|
||||||
- name: 替换 kube-proxy.kubeconfig 的 apiserver 地址
|
|
||||||
lineinfile:
|
|
||||||
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
|
||||||
regexp: "^ server"
|
|
||||||
line: " server: https://{{ inventory_hostname }}:6443"
|
|
||||||
when: "inventory_hostname in groups['kube-master']"
|
|
||||||
|
|
||||||
- name: 创建kube-proxy 服务文件
|
- name: 创建kube-proxy 服务文件
|
||||||
template: src=kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
template: src=kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
||||||
|
|
|
@ -35,3 +35,33 @@
|
||||||
shell: systemctl start haproxy
|
shell: systemctl start haproxy
|
||||||
when: "groups['kube-master']|length > 1"
|
when: "groups['kube-master']|length > 1"
|
||||||
tags: restart_lb
|
tags: restart_lb
|
||||||
|
|
||||||
|
# master 节点从1个增加到2个时候,需要修改如下配置
|
||||||
|
# master 节点从2个减少到1个时候,也需要修改
|
||||||
|
- block:
|
||||||
|
- name: 替换 kubeconfig 的 apiserver 地址
|
||||||
|
lineinfile:
|
||||||
|
dest: /root/.kube/config
|
||||||
|
regexp: "^ server"
|
||||||
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
|
|
||||||
|
- name: 替换 kubelet.kubeconfig 的 apiserver 地址
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/kubernetes/kubelet.kubeconfig
|
||||||
|
regexp: "^ server"
|
||||||
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
|
|
||||||
|
- name: 替换 kube-proxy.kubeconfig 的 apiserver 地址
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
||||||
|
regexp: "^ server"
|
||||||
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
|
|
||||||
|
- name: restart kube-node service
|
||||||
|
service: name={{ item }} state=restarted
|
||||||
|
with_items:
|
||||||
|
- kubelet
|
||||||
|
- kube-proxy
|
||||||
|
ignore_errors: true
|
||||||
|
when: "MASTER_CHG == 'yes' and groups['kube-master']|length < 3"
|
||||||
|
tags: restart_lb
|
||||||
|
|
|
@ -4,10 +4,6 @@
|
||||||
- hosts: "{{ NODE_TO_DEL }}"
|
- hosts: "{{ NODE_TO_DEL }}"
|
||||||
tasks:
|
tasks:
|
||||||
- name: fail info1
|
- name: fail info1
|
||||||
fail: msg="you CAN NOT delete the last member of etcd cluster!"
|
|
||||||
when: "groups['etcd']|length < 2 and NODE_TO_DEL in groups['etcd']"
|
|
||||||
|
|
||||||
- name: fail info2
|
|
||||||
fail: msg="you CAN NOT delete the last member of kube-master!"
|
fail: msg="you CAN NOT delete the last member of kube-master!"
|
||||||
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
||||||
|
|
||||||
|
@ -22,7 +18,7 @@
|
||||||
DEL_LB: "yes"
|
DEL_LB: "yes"
|
||||||
roles:
|
roles:
|
||||||
- clean
|
- clean
|
||||||
task:
|
tasks:
|
||||||
- name: 执行 kubectl delete node
|
- name: 执行 kubectl delete node
|
||||||
shell: "{{ bin_dir }}/kubectl delete node {{ NODE_TO_DEL }}"
|
shell: "{{ bin_dir }}/kubectl delete node {{ NODE_TO_DEL }}"
|
||||||
connection: local
|
connection: local
|
||||||
|
|
|
@ -4,10 +4,6 @@
|
||||||
- hosts: "{{ NODE_TO_DEL }}"
|
- hosts: "{{ NODE_TO_DEL }}"
|
||||||
tasks:
|
tasks:
|
||||||
- name: fail info1
|
- name: fail info1
|
||||||
fail: msg="you CAN NOT delete the last member of etcd cluster!"
|
|
||||||
when: "groups['etcd']|length < 2 and NODE_TO_DEL in groups['etcd']"
|
|
||||||
|
|
||||||
- name: fail info2
|
|
||||||
fail: msg="you CAN NOT delete the last member of kube-master!"
|
fail: msg="you CAN NOT delete the last member of kube-master!"
|
||||||
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
when: "groups['kube-master']|length < 2 and NODE_TO_DEL in groups['kube-master']"
|
||||||
|
|
||||||
|
@ -23,7 +19,7 @@
|
||||||
DEL_LB: "yes"
|
DEL_LB: "yes"
|
||||||
roles:
|
roles:
|
||||||
- clean
|
- clean
|
||||||
task:
|
tasks:
|
||||||
- name: 执行 kubectl delete node
|
- name: 执行 kubectl delete node
|
||||||
shell: "{{ bin_dir }}/kubectl delete node {{ NODE_TO_DEL }}"
|
shell: "{{ bin_dir }}/kubectl delete node {{ NODE_TO_DEL }}"
|
||||||
delegate_to: "{{ groups['kube-node'][0] }}"
|
delegate_to: "{{ groups['kube-node'][0] }}"
|
||||||
|
|
|
@ -107,7 +107,7 @@ function add-master() {
|
||||||
ansible-playbook $BASEPATH/tools/03.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
|
ansible-playbook $BASEPATH/tools/03.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
|
||||||
|
|
||||||
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||||
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb -e MASTER_CHG=yes || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||||||
|
|
||||||
# save current cluster context if needed
|
# save current cluster context if needed
|
||||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||||
|
@ -160,7 +160,7 @@ function del-node() {
|
||||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||||
|
|
||||||
#
|
#
|
||||||
ansible-playbook $BASEPATH/tools/12.delnode.yml -e ETCD_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-node': $1!"; return 2; }
|
ansible-playbook $BASEPATH/tools/12.delnode.yml -e NODE_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-node': $1!"; return 2; }
|
||||||
|
|
||||||
# remove node in ansible hosts
|
# remove node in ansible hosts
|
||||||
sed -i '/^\[kube-node/,/^\[harbor/{/^'"$1"'[^0-9]*$/d}' $BASEPATH/hosts
|
sed -i '/^\[kube-node/,/^\[harbor/{/^'"$1"'[^0-9]*$/d}' $BASEPATH/hosts
|
||||||
|
@ -175,7 +175,7 @@ function del-master() {
|
||||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||||
|
|
||||||
#
|
#
|
||||||
ansible-playbook $BASEPATH/tools/13.delmaster.yml -e ETCD_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-master': $1!"; return 2; }
|
ansible-playbook $BASEPATH/tools/13.delmaster.yml -e NODE_TO_DEL=$1 || { echo "[ERROR] Failed to delete 'kube-master': $1!"; return 2; }
|
||||||
|
|
||||||
# remove node in ansible hosts
|
# remove node in ansible hosts
|
||||||
sed -i '/^\[kube-master/,/^\[kube-node/{/^'"$1"'[^0-9]*$/d}' $BASEPATH/hosts
|
sed -i '/^\[kube-master/,/^\[kube-node/{/^'"$1"'[^0-9]*$/d}' $BASEPATH/hosts
|
||||||
|
@ -183,6 +183,9 @@ function del-master() {
|
||||||
# reconfig kubeconfig in ansible controller
|
# reconfig kubeconfig in ansible controller
|
||||||
ansible-playbook $BASEPATH/01.prepare.yml -t create_kctl_cfg
|
ansible-playbook $BASEPATH/01.prepare.yml -t create_kctl_cfg
|
||||||
|
|
||||||
|
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||||
|
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb -e MASTER_CHG=yes || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||||||
|
|
||||||
# save current cluster context if needed
|
# save current cluster context if needed
|
||||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||||
return 0
|
return 0
|
||||||
|
|
Loading…
Reference in New Issue