mirror of https://github.com/easzlab/kubeasz.git
清除 new-node/new-master 相关配置
parent
faccf8209b
commit
ae5bd3f985
|
@ -1,8 +1,6 @@
|
||||||
# to install docker service
|
# to install docker service
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
roles:
|
roles:
|
||||||
- docker
|
- docker
|
||||||
|
|
|
@ -16,9 +16,7 @@
|
||||||
|
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Define 'harbor_host', a domain
|
- name: Define 'harbor_host', a domain
|
||||||
set_fact: harbor_host="{{ hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] }}"
|
set_fact: harbor_host="{{ hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] }}"
|
||||||
|
|
|
@ -1,16 +1,10 @@
|
||||||
# reconfigure and restart the haproxy service
|
- hosts: "{{ NODE_TO_ADD }}"
|
||||||
- hosts: lb
|
|
||||||
roles:
|
|
||||||
- lb
|
|
||||||
|
|
||||||
- hosts: new-master
|
|
||||||
roles:
|
roles:
|
||||||
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
|
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
|
||||||
- prepare
|
- prepare
|
||||||
- docker
|
- docker
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
#
|
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
||||||
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
|
||||||
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
|
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
|
||||||
|
@ -18,40 +12,17 @@
|
||||||
#
|
#
|
||||||
tasks:
|
tasks:
|
||||||
- name: Making master nodes SchedulingDisabled
|
- name: Making master nodes SchedulingDisabled
|
||||||
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} "
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
delegate_to: "{{ groups.deploy[0] }}"
|
||||||
when: DEPLOY_MODE != "allinone"
|
when: DEPLOY_MODE != "allinone"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: Setting master role name
|
- name: Setting master role name
|
||||||
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
|
shell: "{{ bin_dir }}/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
delegate_to: "{{ groups.deploy[0] }}"
|
||||||
|
|
||||||
# modify the ansible hosts file
|
# reconfigure and restart the haproxy service
|
||||||
- hosts:
|
- hosts: lb
|
||||||
- new-master
|
roles:
|
||||||
tasks:
|
- { role: lb, tags:['restart_lb'] }
|
||||||
- name: tag new-master FINISHED=yes
|
|
||||||
shell: 'sed -i "/\[new-master/,/\[kube-node/s/{{ inventory_hostname }}/{{ inventory_hostname }} FINISHED=yes/" {{ base_dir }}/hosts'
|
|
||||||
args:
|
|
||||||
warn: false
|
|
||||||
connection: local
|
|
||||||
|
|
||||||
- name: cp new-master to 'kube-master' group
|
|
||||||
lineinfile:
|
|
||||||
dest: "{{ base_dir }}/hosts"
|
|
||||||
state: present
|
|
||||||
insertafter: '^\[kube-master'
|
|
||||||
firstmatch: yes
|
|
||||||
line: "{{ inventory_hostname }} NEW_MASTER=yes"
|
|
||||||
connection: local
|
|
||||||
|
|
||||||
- hosts: deploy
|
|
||||||
tasks:
|
|
||||||
- name: rm new-master in ansible hosts
|
|
||||||
lineinfile:
|
|
||||||
dest: "{{ base_dir }}/hosts"
|
|
||||||
state: absent
|
|
||||||
regexp: 'FINISHED=yes'
|
|
||||||
connection: local
|
|
||||||
|
|
|
@ -4,9 +4,7 @@
|
||||||
# update kubectl binary
|
# update kubectl binary
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
- deploy
|
- deploy
|
||||||
roles:
|
roles:
|
||||||
- prepare
|
- prepare
|
||||||
|
@ -14,7 +12,6 @@
|
||||||
# update masters
|
# update masters
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
|
@ -22,6 +19,5 @@
|
||||||
# update nodes
|
# update nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
roles:
|
roles:
|
||||||
- { role: kube-node, when: "DEPLOY_MODE != 'allinone'" }
|
- { role: kube-node, when: "DEPLOY_MODE != 'allinone'" }
|
||||||
|
|
|
@ -41,15 +41,12 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-master
|
|
||||||
- new-node
|
|
||||||
roles:
|
roles:
|
||||||
- docker
|
- docker
|
||||||
|
|
||||||
# to set up 'kube-master' nodes
|
# to set up 'kube-master' nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
|
@ -67,7 +64,6 @@
|
||||||
# to set up 'kube-node' nodes
|
# to set up 'kube-node' nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
roles:
|
roles:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,7 @@
|
||||||
# to clean 'kube-node' nodes
|
# to clean 'kube-node' nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: stop and disable kube-node service
|
- name: stop and disable kube-node service
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
|
@ -33,7 +31,6 @@
|
||||||
# to clean 'kube-master' nodes
|
# to clean 'kube-master' nodes
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: stop and disable kube-master service
|
- name: stop and disable kube-master service
|
||||||
service: name={{ item }} state=stopped enabled=no
|
service: name={{ item }} state=stopped enabled=no
|
||||||
|
@ -54,9 +51,7 @@
|
||||||
# to clean docker service and networking
|
# to clean docker service and networking
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: clean 'kube-router' stuff
|
- name: clean 'kube-router' stuff
|
||||||
shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config"
|
shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config"
|
||||||
|
@ -168,9 +163,7 @@
|
||||||
# to clean ntp, certs and keys, env path
|
# to clean ntp, certs and keys, env path
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
- deploy
|
- deploy
|
||||||
- etcd
|
- etcd
|
||||||
- lb
|
- lb
|
||||||
|
|
|
@ -25,9 +25,6 @@ listen ingress-node
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube-node'] %}
|
||||||
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['new-node'] %}
|
|
||||||
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
|
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
|
||||||
|
|
||||||
|
@ -39,7 +36,4 @@ listen ingress-node-tls
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube-node'] %}
|
||||||
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['new-node'] %}
|
|
||||||
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -25,7 +25,7 @@ ExecStart={{ bin_dir }}/kube-apiserver \
|
||||||
--etcd-keyfile={{ ca_dir }}/kubernetes-key.pem \
|
--etcd-keyfile={{ ca_dir }}/kubernetes-key.pem \
|
||||||
--etcd-servers={{ ETCD_ENDPOINTS }} \
|
--etcd-servers={{ ETCD_ENDPOINTS }} \
|
||||||
--enable-swagger-ui=true \
|
--enable-swagger-ui=true \
|
||||||
--apiserver-count={% if DEPLOY_MODE == "multi-master" %}{{ groups['kube-master']|length + groups['new-master']|length }}{% else %}1{% endif %} \
|
--apiserver-count={% if DEPLOY_MODE == "multi-master" %}{{ groups['kube-master']|length }}{% else %}1{% endif %} \
|
||||||
--allow-privileged=true \
|
--allow-privileged=true \
|
||||||
--audit-log-maxage=30 \
|
--audit-log-maxage=30 \
|
||||||
--audit-log-maxbackup=3 \
|
--audit-log-maxbackup=3 \
|
||||||
|
|
|
@ -23,9 +23,6 @@ listen kube-master
|
||||||
{% for host in groups['kube-master'] %}
|
{% for host in groups['kube-master'] %}
|
||||||
server {{ host }} {{ host }}:6443 check inter 2000 fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:6443 check inter 2000 fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['new-master'] %}
|
|
||||||
server {{ host }} {{ host }}:6443 check inter 2000 fall 2 rise 2 weight 1
|
|
||||||
{% endfor %}
|
|
||||||
{% if INGRESS_NODEPORT_LB == "yes" %}
|
{% if INGRESS_NODEPORT_LB == "yes" %}
|
||||||
|
|
||||||
listen ingress-node
|
listen ingress-node
|
||||||
|
@ -36,9 +33,6 @@ listen ingress-node
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube-node'] %}
|
||||||
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['new-node'] %}
|
|
||||||
server {{ host }} {{ host }}:23456 check inter 2000 fall 2 rise 2 weight 1
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
|
{% if INGRESS_TLS_NODEPORT_LB == "yes" %}
|
||||||
|
|
||||||
|
@ -50,7 +44,4 @@ listen ingress-node-tls
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube-node'] %}
|
||||||
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
|
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['new-node'] %}
|
|
||||||
server {{ host }} {{ host }}:23457 check inter 2000 fall 2 rise 2 weight 1
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
# 重置k8s pod网络脚本,使用请仔细阅读 docs/op/change_k8s_network.md
|
# 重置k8s pod网络脚本,使用请仔细阅读 docs/op/change_k8s_network.md
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: 获取所有已经创建的POD信息
|
- name: 获取所有已经创建的POD信息
|
||||||
command: "{{ bin_dir }}/kubectl get daemonset -n kube-system"
|
command: "{{ bin_dir }}/kubectl get daemonset -n kube-system"
|
||||||
|
@ -98,9 +96,7 @@
|
||||||
|
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- new-master
|
|
||||||
- kube-node
|
- kube-node
|
||||||
- new-node
|
|
||||||
# 安装新的网络插件
|
# 安装新的网络插件
|
||||||
roles:
|
roles:
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
||||||
|
|
Loading…
Reference in New Issue