Merge pull request #131 from panhongyin/master

修复单节点任务BUG,删除多余变量
pull/129/merge
gjmzj 2018-03-15 17:36:08 +08:00 committed by GitHub
commit 20038698f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 48 additions and 26 deletions

View File

@ -10,7 +10,7 @@
192.168.1.1 NODE_IP="192.168.1.1"
[kube-node]
192.168.1.1 NODE_ID=node1 NODE_IP="192.168.1.1"
192.168.1.1 NODE_IP="192.168.1.1"
# 如果启用harbor请配置后面harbor相关参数
[harbor]
@ -18,7 +18,7 @@
# 预留组后续添加node节点使用
[new-node]
#192.168.1.xx NODE_ID=node6 NODE_IP="192.168.1.xx"
#192.168.1.xx NODE_IP="192.168.1.xx"
[all:vars]
# ---------集群主要参数---------------

View File

@ -24,9 +24,9 @@ ROUTER_ID=57 # 取值在0-255之间区分多个instance的
MASTER_PORT="8443" # 设置 api-server VIP地址的服务端口
[kube-node]
192.168.1.2 NODE_ID=node1 NODE_IP="192.168.1.2"
192.168.1.3 NODE_ID=node2 NODE_IP="192.168.1.3"
192.168.1.4 NODE_ID=node3 NODE_IP="192.168.1.4"
192.168.1.2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_IP="192.168.1.3"
192.168.1.4 NODE_IP="192.168.1.4"
# 如果启用harbor请配置后面harbor相关参数
[harbor]
@ -34,8 +34,8 @@ MASTER_PORT="8443" # 设置 api-server VIP地址的服务端口
# 预留组后续添加node节点使用
[new-node]
#192.168.1.xx NODE_ID=node6 NODE_IP="192.168.1.xx"
#192.168.1.xx NODE_ID=node7 NODE_IP="192.168.1.xx"
#192.168.1.xx NODE_IP="192.168.1.xx"
#192.168.1.xx NODE_IP="192.168.1.xx"
[all:vars]
# ---------集群主要参数---------------

View File

@ -12,9 +12,9 @@
192.168.1.1 NODE_IP="192.168.1.1"
[kube-node]
192.168.1.1 NODE_ID=node1 NODE_IP="192.168.1.1"
192.168.1.2 NODE_ID=node2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_ID=node3 NODE_IP="192.168.1.3"
192.168.1.1 NODE_IP="192.168.1.1"
192.168.1.2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_IP="192.168.1.3"
# 如果启用harbor请配置后面harbor相关参数
[harbor]
@ -22,7 +22,7 @@
# 预留组后续添加node节点使用
[new-node]
#192.168.1.xx NODE_ID=node6 NODE_IP="192.168.1.xx"
#192.168.1.xx NODE_IP="192.168.1.xx"
[all:vars]
# ---------集群主要参数---------------

View File

@ -27,11 +27,16 @@
- name: 准备 calico rbac文件
template: src=calico-rbac.yaml.j2 dest=/root/local/kube-system/calico/calico-rbac.yaml
# 只需单节点执行一次,重复执行的报错可以忽略
- name: 获取所有已经创建的POD信息
command: "kubectl get pod --all-namespaces"
register: pod_info
run_once: true
# 只需单节点执行一次
- name: 运行 calico网络
shell: "{{ bin_dir }}/kubectl create -f /root/local/kube-system/calico/ && sleep 15"
when: NODE_ID is defined and NODE_ID == "node1"
ignore_errors: true
run_once: true
when: '"calico" not in pod_info.stdout'
# 删除原有cni配置
- name: 删除默认cni配置

View File

@ -16,11 +16,16 @@
- name: 准备 flannel DaemonSet yaml文件
template: src=kube-flannel.yaml.j2 dest=/root/local/kube-system/flannel/kube-flannel.yaml
# 只需单节点执行一次,重复执行的报错可以忽略
- name: 获取所有已经创建的POD信息
command: "kubectl get pod --all-namespaces"
register: pod_info
run_once: true
# 只需单节点执行一次
- name: 运行 flannel网络
shell: "{{ bin_dir }}/kubectl create -f /root/local/kube-system/flannel/ && sleep 15"
when: NODE_ID is defined and NODE_ID == "node1"
ignore_errors: true
run_once: true
when: '"flannel" not in pod_info.stdout'
# 删除原有cni配置
- name: 删除默认cni配置

View File

@ -63,3 +63,11 @@
- name: start-kube-scheduler
shell: systemctl restart kube-scheduler
- name: 以轮询的方式等待master服务启动完成
command: "kubectl get node"
register: result
until: result.rc == 0
retries: 5
delay: 6
run_once: True

View File

@ -14,16 +14,20 @@
- bridge
- host-local
- loopback
- name: get clusterrolebinding info
command: "kubectl get clusterrolebinding --all-namespaces"
register: clusterrolebinding_info
run_once: true
##----------kubelet 配置部分--------------
# kubelet 启动时向 kube-apiserver 发送 TLS bootstrapping 请求,需要绑定该角色
# 只需单节点执行一次,重复执行的报错可以忽略
# 增加15s等待kube-apiserver正常工作
# 只需单节点执行一次
- name: kubelet-bootstrap-setting
shell: "sleep 15 && {{ bin_dir }}/kubectl create clusterrolebinding kubelet-bootstrap \
shell: "{{ bin_dir }}/kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper --user=kubelet-bootstrap"
when: NODE_ID is defined and NODE_ID == "node1"
ignore_errors: true
run_once: True
when: '"kubelet-bootstrap" not in clusterrolebinding_info.stdout'
#创建bootstrap.kubeconfig配置文件
- name: 设置集群参数
@ -59,8 +63,8 @@
tags: kubelet
- name: approve-kubelet-csr
shell: "sleep 15 && {{ bin_dir }}/kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs {{ bin_dir }}/kubectl certificate approve"
when: NODE_ID is defined and NODE_ID == "node1"
shell: "{{ bin_dir }}/kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs {{ bin_dir }}/kubectl certificate approve"
run_once: true
ignore_errors: true
##-------kube-proxy部分----------------

View File

@ -10,6 +10,6 @@ vrrp_instance VI-kube-master {
virtual_router_id {{ ROUTER_ID }}
advert_int 3
virtual_ipaddress {
{{ MASTER_IP }}
{{ MASTER_IP }}/24
}
}

View File

@ -19,6 +19,6 @@ vrrp_instance VI-kube-master {
check-haproxy
}
virtual_ipaddress {
{{ MASTER_IP }}
{{ MASTER_IP }}/24
}
}