diff --git a/example/hosts.allinone.example b/example/hosts.allinone.example index 8fc844b..42c16ba 100644 --- a/example/hosts.allinone.example +++ b/example/hosts.allinone.example @@ -10,7 +10,7 @@ 192.168.1.1 NODE_IP="192.168.1.1" [kube-node] -192.168.1.1 NODE_ID=node1 NODE_IP="192.168.1.1" +192.168.1.1 NODE_IP="192.168.1.1" # 如果启用harbor,请配置后面harbor相关参数 [harbor] @@ -18,7 +18,7 @@ # 预留组,后续添加node节点使用 [new-node] -#192.168.1.xx NODE_ID=node6 NODE_IP="192.168.1.xx" +#192.168.1.xx NODE_IP="192.168.1.xx" [all:vars] # ---------集群主要参数--------------- diff --git a/example/hosts.m-masters.example b/example/hosts.m-masters.example index 7bdaee2..0e5cf45 100644 --- a/example/hosts.m-masters.example +++ b/example/hosts.m-masters.example @@ -24,9 +24,9 @@ ROUTER_ID=57 # 取值在0-255之间,区分多个instance的 MASTER_PORT="8443" # 设置 api-server VIP地址的服务端口 [kube-node] -192.168.1.2 NODE_ID=node1 NODE_IP="192.168.1.2" -192.168.1.3 NODE_ID=node2 NODE_IP="192.168.1.3" -192.168.1.4 NODE_ID=node3 NODE_IP="192.168.1.4" +192.168.1.2 NODE_IP="192.168.1.2" +192.168.1.3 NODE_IP="192.168.1.3" +192.168.1.4 NODE_IP="192.168.1.4" # 如果启用harbor,请配置后面harbor相关参数 [harbor] @@ -34,8 +34,8 @@ MASTER_PORT="8443" # 设置 api-server VIP地址的服务端口 # 预留组,后续添加node节点使用 [new-node] -#192.168.1.xx NODE_ID=node6 NODE_IP="192.168.1.xx" -#192.168.1.xx NODE_ID=node7 NODE_IP="192.168.1.xx" +#192.168.1.xx NODE_IP="192.168.1.xx" +#192.168.1.xx NODE_IP="192.168.1.xx" [all:vars] # ---------集群主要参数--------------- diff --git a/example/hosts.s-master.example b/example/hosts.s-master.example index c376a54..4eabc55 100644 --- a/example/hosts.s-master.example +++ b/example/hosts.s-master.example @@ -12,9 +12,9 @@ 192.168.1.1 NODE_IP="192.168.1.1" [kube-node] -192.168.1.1 NODE_ID=node1 NODE_IP="192.168.1.1" -192.168.1.2 NODE_ID=node2 NODE_IP="192.168.1.2" -192.168.1.3 NODE_ID=node3 NODE_IP="192.168.1.3" +192.168.1.1 NODE_IP="192.168.1.1" +192.168.1.2 NODE_IP="192.168.1.2" +192.168.1.3 NODE_IP="192.168.1.3" # 如果启用harbor,请配置后面harbor相关参数 [harbor] @@ -22,7 +22,7 @@ # 预留组,后续添加node节点使用 [new-node] -#192.168.1.xx NODE_ID=node6 NODE_IP="192.168.1.xx" +#192.168.1.xx NODE_IP="192.168.1.xx" [all:vars] # ---------集群主要参数--------------- diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index bd8e46d..9d45ed9 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -27,11 +27,16 @@ - name: 准备 calico rbac文件 template: src=calico-rbac.yaml.j2 dest=/root/local/kube-system/calico/calico-rbac.yaml -# 只需单节点执行一次,重复执行的报错可以忽略 +- name: 获取所有已经创建的POD信息 + command: "kubectl get pod --all-namespaces" + register: pod_info + run_once: true + +# 只需单节点执行一次 - name: 运行 calico网络 shell: "{{ bin_dir }}/kubectl create -f /root/local/kube-system/calico/ && sleep 15" - when: NODE_ID is defined and NODE_ID == "node1" - ignore_errors: true + run_once: true + when: '"calico" not in pod_info.stdout' # 删除原有cni配置 - name: 删除默认cni配置 diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 2a0c16d..4b5bfb5 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -16,11 +16,16 @@ - name: 准备 flannel DaemonSet yaml文件 template: src=kube-flannel.yaml.j2 dest=/root/local/kube-system/flannel/kube-flannel.yaml -# 只需单节点执行一次,重复执行的报错可以忽略 +- name: 获取所有已经创建的POD信息 + command: "kubectl get pod --all-namespaces" + register: pod_info + run_once: true + +# 只需单节点执行一次 - name: 运行 flannel网络 shell: "{{ bin_dir }}/kubectl create -f /root/local/kube-system/flannel/ && sleep 15" - when: NODE_ID is defined and NODE_ID == "node1" - ignore_errors: true + run_once: true + when: '"flannel" not in pod_info.stdout' # 删除原有cni配置 - name: 删除默认cni配置 diff --git a/roles/kube-master/tasks/main.yml b/roles/kube-master/tasks/main.yml index 129bd07..e46f532 100644 --- a/roles/kube-master/tasks/main.yml +++ b/roles/kube-master/tasks/main.yml @@ -63,3 +63,11 @@ - name: start-kube-scheduler shell: systemctl restart kube-scheduler + +- name: 以轮询的方式等待master服务启动完成 + command: "kubectl get node" + register: result + until: result.rc == 0 + retries: 5 + delay: 6 + run_once: True diff --git a/roles/kube-node/tasks/main.yml b/roles/kube-node/tasks/main.yml index 597fa76..cfba2c8 100644 --- a/roles/kube-node/tasks/main.yml +++ b/roles/kube-node/tasks/main.yml @@ -14,16 +14,20 @@ - bridge - host-local - loopback + +- name: get clusterrolebinding info + command: "kubectl get clusterrolebinding --all-namespaces" + register: clusterrolebinding_info + run_once: true ##----------kubelet 配置部分-------------- # kubelet 启动时向 kube-apiserver 发送 TLS bootstrapping 请求,需要绑定该角色 -# 只需单节点执行一次,重复执行的报错可以忽略 -# 增加15s等待kube-apiserver正常工作 +# 只需单节点执行一次 - name: kubelet-bootstrap-setting - shell: "sleep 15 && {{ bin_dir }}/kubectl create clusterrolebinding kubelet-bootstrap \ + shell: "{{ bin_dir }}/kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap" - when: NODE_ID is defined and NODE_ID == "node1" - ignore_errors: true + run_once: True + when: '"kubelet-bootstrap" not in clusterrolebinding_info.stdout' #创建bootstrap.kubeconfig配置文件 - name: 设置集群参数 @@ -59,8 +63,8 @@ tags: kubelet - name: approve-kubelet-csr - shell: "sleep 15 && {{ bin_dir }}/kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs {{ bin_dir }}/kubectl certificate approve" - when: NODE_ID is defined and NODE_ID == "node1" + shell: "{{ bin_dir }}/kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs {{ bin_dir }}/kubectl certificate approve" + run_once: true ignore_errors: true ##-------kube-proxy部分---------------- diff --git a/roles/lb/templates/keepalived-backup.conf.j2 b/roles/lb/templates/keepalived-backup.conf.j2 index 06f32cb..6a25279 100644 --- a/roles/lb/templates/keepalived-backup.conf.j2 +++ b/roles/lb/templates/keepalived-backup.conf.j2 @@ -10,6 +10,6 @@ vrrp_instance VI-kube-master { virtual_router_id {{ ROUTER_ID }} advert_int 3 virtual_ipaddress { - {{ MASTER_IP }} + {{ MASTER_IP }}/24 } } diff --git a/roles/lb/templates/keepalived-master.conf.j2 b/roles/lb/templates/keepalived-master.conf.j2 index d9d6ed2..07cad7c 100644 --- a/roles/lb/templates/keepalived-master.conf.j2 +++ b/roles/lb/templates/keepalived-master.conf.j2 @@ -19,6 +19,6 @@ vrrp_instance VI-kube-master { check-haproxy } virtual_ipaddress { - {{ MASTER_IP }} + {{ MASTER_IP }}/24 } }