mirror of https://github.com/easzlab/kubeasz.git
调整addetcd/addnode/addmaster脚本
parent
d2d164b2b8
commit
6e6792bbd4
|
@ -1,5 +1,7 @@
|
|||
# WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
|
||||
# Read the guide: 'op/upgrade.md' .
|
||||
# Usage: `ansible-playbook /etc/ansible/22.upgrade.yml -t upgrade_k8s`
|
||||
# or `easzctl upgrade`
|
||||
|
||||
# update masters
|
||||
- hosts:
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
# 创建aggregator proxy相关证书
|
||||
- name: 创建 aggregator proxy证书签名请求
|
||||
template: src=aggregator-proxy-csr.json.j2 dest={{ ca_dir }}/aggregator-proxy-csr.json
|
||||
tags: upgrade_k8s
|
||||
|
||||
- name: 创建 aggregator-proxy证书和私钥
|
||||
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
|
||||
|
@ -39,7 +38,6 @@
|
|||
-ca-key={{ ca_dir }}/ca-key.pem \
|
||||
-config={{ ca_dir }}/ca-config.json \
|
||||
-profile=kubernetes aggregator-proxy-csr.json | {{ bin_dir }}/cfssljson -bare aggregator-proxy"
|
||||
tags: upgrade_k8s
|
||||
|
||||
- block:
|
||||
- name: 生成 basic-auth 随机密码
|
||||
|
@ -66,17 +64,17 @@
|
|||
- kube-apiserver.service
|
||||
- kube-controller-manager.service
|
||||
- kube-scheduler.service
|
||||
tags: upgrade_k8s, restart_master
|
||||
tags: restart_master
|
||||
|
||||
# 为兼容v1.8版本,配置不同 kube-apiserver的systemd unit文件
|
||||
- name: 获取 k8s 版本信息
|
||||
shell: "{{ bin_dir }}/kube-apiserver --version"
|
||||
register: k8s_ver
|
||||
tags: upgrade_k8s, restart_master
|
||||
tags: restart_master
|
||||
|
||||
- name: 创建kube-apiserver v1.8的systemd unit文件
|
||||
template: src=kube-apiserver-v1.8.service.j2 dest=/etc/systemd/system/kube-apiserver.service
|
||||
tags: upgrade_k8s, restart_master
|
||||
tags: restart_master
|
||||
when: "'v1.8' in k8s_ver.stdout"
|
||||
|
||||
- name: enable master 服务
|
||||
|
|
|
@ -80,13 +80,15 @@
|
|||
- name: 注册变量 DNS_SVC_IP
|
||||
shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+2}'
|
||||
register: DNS_SVC_IP
|
||||
tags: restart_node
|
||||
|
||||
- name: 设置变量 CLUSTER_DNS_SVC_IP
|
||||
set_fact: CLUSTER_DNS_SVC_IP={{ DNS_SVC_IP.stdout }}
|
||||
tags: restart_node
|
||||
|
||||
- name: 创建kubelet的systemd unit文件
|
||||
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
|
||||
tags: upgrade_k8s, restart_node
|
||||
tags: restart_node
|
||||
|
||||
- name: 开机启用kubelet 服务
|
||||
shell: systemctl enable kubelet
|
||||
|
@ -113,8 +115,8 @@
|
|||
when: "inventory_hostname in groups['kube-master']"
|
||||
|
||||
- name: 创建kube-proxy 服务文件
|
||||
tags: reload-kube-proxy, upgrade_k8s, restart_node
|
||||
template: src=kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
|
||||
tags: reload-kube-proxy, restart_node
|
||||
|
||||
- name: 开机启用kube-proxy 服务
|
||||
shell: systemctl enable kube-proxy
|
||||
|
@ -131,6 +133,7 @@
|
|||
until: '"running" in kubelet_status.stdout'
|
||||
retries: 8
|
||||
delay: 2
|
||||
tags: reload-kube-proxy, upgrade_k8s, restart_node
|
||||
|
||||
- name: 轮询等待node达到Ready状态
|
||||
shell: "{{ bin_dir }}/kubectl get node {{ inventory_hostname }}|awk 'NR>1{print $2}'"
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
- name: fail info1
|
||||
fail: msg="an 'kube-node' node CAN NOT be a 'ex-lb' node at the same time"
|
||||
when: "inventory_hostname in groups['ex-lb']"
|
||||
tags: restart_lb
|
||||
|
||||
- name: 安装 haproxy
|
||||
package: name=haproxy state=present
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
tasks:
|
||||
- name: add a new etcd member
|
||||
shell: "ETCDCTL_API=3 {{ bin_dir }}/etcdctl member add {{ NODE_NAME }} --peer-urls=https://{{ NODE_TO_ADD }}:2380"
|
||||
# new etcd node will be groups.etcd[0]
|
||||
delegate_to: "{{ groups.etcd[1] }}"
|
||||
|
||||
# start the new-etcd node
|
||||
|
@ -13,7 +14,7 @@
|
|||
vars:
|
||||
CLUSTER_STATE: existing
|
||||
roles:
|
||||
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
|
||||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||
- prepare
|
||||
- etcd
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# Note: this playbook cann't run independently
|
||||
# Note: this playbook can not run independently
|
||||
# Usage: easzctl add-node 1.1.1.1
|
||||
|
||||
- hosts: "{{ NODE_TO_ADD }}"
|
||||
roles:
|
||||
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
|
||||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||
- prepare
|
||||
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
||||
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
- hosts: "{{ NODE_TO_ADD }}"
|
||||
roles:
|
||||
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
|
||||
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||
- prepare
|
||||
- { role: docker, when: "CONTAINER_RUNTIME == 'docker'" }
|
||||
- { role: containerd, when: "CONTAINER_RUNTIME == 'containerd'" }
|
||||
|
@ -18,16 +18,12 @@
|
|||
tasks:
|
||||
- name: Making master nodes SchedulingDisabled
|
||||
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_TO_ADD }} "
|
||||
delegate_to: "{{ groups.deploy[0] }}"
|
||||
when: DEPLOY_MODE != "allinone"
|
||||
when: "inventory_hostname not in groups['kube-node']"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Setting master role name
|
||||
shell: "{{ bin_dir }}/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite"
|
||||
ignore_errors: true
|
||||
delegate_to: "{{ groups.deploy[0] }}"
|
||||
|
||||
# reconfigure and restart the haproxy service
|
||||
- hosts: lb
|
||||
roles:
|
||||
- lb
|
||||
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||
# refer to the function 'add-node()' in 'tools/easzctl'
|
||||
|
|
|
@ -89,9 +89,6 @@ function add-master() {
|
|||
# check new master's address regexp
|
||||
[[ $1 =~ ^(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})(\.(2(5[0-5]{1}|[0-4][0-9]{1})|[0-1]?[0-9]{1,2})){3}$ ]] || { echo "[ERROR] Invalid ip address!"; return 2; }
|
||||
|
||||
# check if k8s with DPLOY_MODE='multi-master'
|
||||
grep '^DEPLOY_MODE=multi-master' $BASEPATH/hosts || { echo "[ERROR] only k8s with DPLOY_MODE='multi-master' can have master node added!"; return 2; }
|
||||
|
||||
# check if the new master already exsited
|
||||
sed -n '/^\[kube-master/,/^\[kube-node/p' $BASEPATH/hosts|grep "^$1" && { echo "[ERROR] master $1 already existed!"; return 2; }
|
||||
|
||||
|
@ -101,6 +98,9 @@ function add-master() {
|
|||
# check if playbook runs successfully
|
||||
ansible-playbook $BASEPATH/tools/21.addmaster.yml -e NODE_TO_ADD=$1 || { sed -i "/$1 NEW_MASTER=yes/d" $BASEPATH/hosts; return 2; }
|
||||
|
||||
# reconfigure and restart the haproxy service on 'kube-node' nodes
|
||||
ansible-playbook $BASEPATH/05.kube-node.yml -t restart_lb || { echo "[ERROR] Failed to restart the haproxy service on 'kube-node' nodes!"; return 2; }
|
||||
|
||||
# save current cluster context if needed
|
||||
[ -f "$BASEPATH/.cluster/current_cluster" ] && save_context
|
||||
return 0
|
||||
|
|
Loading…
Reference in New Issue