调整addetcd/addnode/addmaster等脚本2

pull/641/head
gjmzj 2019-06-01 22:16:14 +08:00
parent 6e6792bbd4
commit 254f6528b9
9 changed files with 17 additions and 37 deletions

View File

@ -35,7 +35,7 @@ server {{ groups.deploy[0] }} iburst
- 修改 ansible hosts 文件,在 `deploy` 节点配置 `NTP_ENABLED=yes` (默认: no)
- [可选] 修改 roles/chrony/var/main.yml 中的变量定义,关于文件 roles/chrony/var/main.yml 的由来请看[这里](../setup/config_guide.md)
对于新集群或者新节点,`chrony` 的安装配置已经集成到 `90.setup.yml` `01.prepare.yml` `20.addnode.yml` `21.addmaster.yml`脚本中;对于已运行中的集群请执行如下命令进行安装:
对于新集群或者新节点,`chrony` 的安装配置已经集成到脚本中;对于已运行中的集群请执行如下命令进行安装:
`ansible-playbook /etc/ansible/roles/chrony/chrony.yml `

View File

@ -6,7 +6,7 @@
- /opt/kube/kube-system
- name: 准备配置 kube-router DaemonSet (without IPVS)
template: src=kuberouter.yaml.j2 dest=/opt/kube/kube-system/kuberouter.yaml
template: src=kuberouter.yaml.j2 dest=/opt/kube/kube-system/kube-router.yaml
- name: 下载cni plugins
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
@ -58,7 +58,7 @@
# 只需单节点执行一次
- name: 运行 kube-router DaemonSet
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/kuberouter.yaml"
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/kube-router.yaml"
run_once: true
# 删除原有cni配置

View File

@ -1,5 +1,5 @@
# remove a etcd member
- hosts: deploy
- hosts: localhost
vars_prompt:
- name: "ETCD_TO_DEL"
prompt: "which etcd node is about to be deleted?(e.g 192.168.1.1)"
@ -30,7 +30,6 @@
dest: "{{ base_dir }}/hosts"
state: absent
regexp: '{{ ETCD_NAME.stdout }}'
connection: local
when: "ETCD_NAME.stdout != ''"
- name: delete a etcd member
@ -44,8 +43,7 @@
when: "ETCD_ID.stdout != ''"
- name: reconfig and restart the etcd cluster
shell: "ansible-playbook /etc/ansible/02.etcd.yml > /tmp/ansible-playbook.log 2>&1"
connection: local
shell: "ansible-playbook {{ base_dir }}/02.etcd.yml > /tmp/ansible-playbook.log 2>&1"
when: "ETCD_ID.stdout != ''"
run_once: true
# 满足条件才进行删除

View File

@ -7,7 +7,7 @@
# 3. 检查下修改是否成功,并且能够成功执行 ansible all -m ping
# 4. 运行本脚本 ansible-playbook /etc/ansible/tools/change_ip_aio.yml
- hosts: deploy # hosts 角色无所谓反正allinone所有角色都是同个ip
- hosts: kube-master # hosts 角色无所谓反正allinone所有角色都是同个ip
tasks:
- name: 删除一些证书和配置后面会以新IP重新生成
file: name={{ item }} state=absent
@ -16,25 +16,25 @@
- "/etc/kubernetes/ssl/kubernetes.pem" # 删除旧master证书
- "/etc/kubernetes/kubelet.kubeconfig" # 删除旧kubelet配置文件
- hosts: deploy
- hosts: kube-master
roles:
- deploy
- etcd
- kube-master
- kube-node
- hosts: deploy
- hosts: kube-master
tasks:
- name: 删除老IP地址的node
shell: "{{ bin_dir }}/kubectl get node |grep -v '{{ inventory_hostname }}'|awk '{print $1}' |xargs {{ bin_dir }}/kubectl delete node"
ignore_errors: true
- name: 删除原network插件部署
shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ CLUSTER_NETWORK }}/ || \
{{ bin_dir }}/kubectl delete -f /root/local/kube-system/{{ CLUSTER_NETWORK }}/"
shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ CLUSTER_NETWORK }}.yaml || \
{{ bin_dir }}/kubectl delete -f /opt/kube/kube-ovn/"
ignore_errors: true
- hosts: deploy
- hosts: kube-master
roles:
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }

View File

@ -6,18 +6,16 @@
- name: 获取所有已经创建的POD信息
command: "{{ bin_dir }}/kubectl get daemonset -n kube-system"
register: pod_info
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
- name: 删除原network插件部署
shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ item }}/"
shell: "{{ bin_dir }}/kubectl delete -f /opt/kube/kube-system/{{ item }}.yaml"
with_items:
- calico
- cilium
- flannel
- kube-router
when: 'item in pod_info.stdout'
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
ignore_errors: true
@ -79,21 +77,6 @@
- kube-proxy
ignore_errors: true
- hosts:
- lb
tasks:
- name: 重启lb的keepalived服务
service: name=keepalived state=restarted
- name: 轮询等待apiserver服务恢复
command: "{{ bin_dir }}/kubectl get node"
register: result
until: result.rc == 0
retries: 5
delay: 6
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
- hosts:
- kube-master
- kube-node
@ -104,7 +87,7 @@
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
- hosts: deploy
- hosts: kube-node
tasks:
# 删除所有运行pod由controller自动重建
- name: 重启所有pod
@ -112,4 +95,3 @@
do {{ bin_dir }}/kubectl delete pod --all -n $NS; done;"
ignore_errors: true
run_once: true

View File

@ -78,7 +78,7 @@ function add-node() {
sed -i "/\[kube-node/a $1 NEW_NODE=yes" $BASEPATH/hosts
# check if playbook runs successfully
ansible-playbook $BASEPATH/tools/20.addnode.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_NODE=yes/d" $BASEPATH/hosts; return 2; }
ansible-playbook $BASEPATH/tools/02.addnode.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_NODE=yes/d" $BASEPATH/hosts; return 2; }
# save current cluster context if needed
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
@ -96,7 +96,7 @@ function add-master() {
sed -i "/\[kube-master/a $1 NEW_MASTER=yes" $BASEPATH/hosts
# check if playbook runs successfully
ansible-playbook $BASEPATH/tools/21.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
ansible-playbook $BASEPATH/tools/03.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
# reconfigure and restart the haproxy service on 'kube-node' nodes
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
@ -122,7 +122,7 @@ function add-etcd() {
sed -i "/\[etcd/a $1 NODE_NAME=$NAME" $BASEPATH/hosts
# check if playbook runs successfully
ansible-playbook $BASEPATH/tools/19.addetcd.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NODE_NAME=$NAME/d" $BASEPATH/hosts; return 2; }
ansible-playbook $BASEPATH/tools/01.addetcd.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NODE_NAME=$NAME/d" $BASEPATH/hosts; return 2; }
# restart apiservers to use the new etcd cluster
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master || { echo "[ERROR] Unexpected failures in master nodes!"; return 2; }
@ -137,7 +137,7 @@ function del-etcd() {
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
#
ansible-playbook $BASEPATH/tools/remove_etcd_node.yml -e ETCD_TO_DEL=$1
ansible-playbook $BASEPATH/tools/11.deletcd.yml -e ETCD_TO_DEL=$1 || { echo "[ERROR] Failed to delete etcd node: $1!"; return 2; }
# restart apiservers to use the new etcd cluster
ansible-playbook $BASEPATH/04.kube-master.yml -t restart_master || { echo "[ERROR] Unexpected failures in master nodes!"; return 2; }