调整network/cluster-addon安装流程

pull/641/head
gjmzj 2019-05-29 20:45:25 +08:00
parent 78cc26db8c
commit d17d938dfc
15 changed files with 69 additions and 141 deletions

View File

@ -1,10 +1,10 @@
# [optional] to synchronize time of nodes with 'chrony'
- hosts: all
roles:
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
- { role: chrony, when: "groups['chrony']|length > 0" }
# to create CA, kubeconfig, kube-proxy.kubeconfig etc. on 'deploy' node
- hosts: deploy
# to create CA, kubeconfig, kube-proxy.kubeconfig etc.
- hosts: localhost
roles:
- deploy
@ -12,17 +12,10 @@
- hosts:
- kube-master
- kube-node
- deploy
- etcd
- lb
roles:
- prepare
# [optional] to install loadbalance service, only needed by multi-master cluster
- hosts: lb
roles:
- lb
# to install etcd cluster
- hosts: etcd
roles:
@ -41,23 +34,20 @@
roles:
- kube-master
- kube-node
#
tasks:
- name: Making master nodes SchedulingDisabled
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
delegate_to: "{{ groups.deploy[0] }}"
when: DEPLOY_MODE != "allinone"
when: "inventory_hostname not in groups['kube-node']"
ignore_errors: true
- name: Setting master role name
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
delegate_to: "{{ groups.deploy[0] }}"
# to set up 'kube-node' nodes
- hosts: kube-node
roles:
- { role: kube-node, when: "DEPLOY_MODE != 'allinone'" }
- { role: kube-node, when: "inventory_hostname not in groups['kube-master']" }
# to install network plugin, only one can be choosen
- hosts:
@ -74,5 +64,4 @@
- hosts:
- kube-node
roles:
- cluster-addon
- cluster-addon

View File

@ -361,13 +361,6 @@ spec:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,profile,workloadendpoint,node
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets

View File

@ -370,13 +370,6 @@ spec:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets

View File

@ -374,13 +374,6 @@ spec:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,namespace,serviceaccount,workloadendpoint,node
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
volumeMounts:
# Mount in the etcd TLS secrets.
- mountPath: /calico-secrets

View File

@ -346,14 +346,6 @@ spec:
key: tofqdns-pre-cache
name: cilium-config
optional: true
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
image: docker.io/cilium/cilium:{{ cilium_ver }}
imagePullPolicy: IfNotPresent
lifecycle:
@ -886,14 +878,6 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: metadata.uid
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
image: docker.io/cilium/cilium-etcd-operator:v2.0.5
imagePullPolicy: IfNotPresent
name: cilium-etcd-operator

View File

@ -1,4 +1,4 @@
# dns 自动安装,可选"coredns"和“kubedns”
# dns 自动安装,'dns_backend'可选"coredns"和“kubedns”
dns_install: "yes"
dns_backend: "coredns"
kubednsVer: "1.14.13"

View File

@ -17,8 +17,8 @@
when: 'traefik_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 traefik部署
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml"
connection: local
run_once: true
when: 'ingress_backend == "traefik"'
ignore_errors: true
@ -42,8 +42,8 @@
when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 nginx_ingress部署
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
connection: local
run_once: true
when: 'ingress_backend == "nginx-ingress"'
ignore_errors: true

View File

@ -1,21 +1,18 @@
- block:
- name: 在deploy 节点创建相关目录
file: path={{ item }} state=directory
with_items:
- /opt/kube/kube-system/kubedns
- /opt/kube/kube-system/coredns
# DNS文件中部分参数根据hosts文件设置而定因此需要用template模块替换参数
- name: 准备 DNS的部署文件
template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}/{{ item }}.yaml
with_items:
- kubedns
- coredns
- name: 获取所有已经创建的POD信息
command: "{{ bin_dir }}/kubectl get pod --all-namespaces"
register: pod_info
delegate_to: "{{ groups.deploy[0] }}"
- name: 在 node 节点创建相关目录
file: path={{ item }} state=directory
with_items:
- /opt/kube/kube-system
# DNS文件中部分参数根据hosts文件设置而定因此需要用template模块替换参数
- name: 准备 DNS的部署文件
template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}.yaml
with_items:
- kubedns
- coredns
- name: 获取所有已经创建的POD信息
command: "{{ bin_dir }}/kubectl get pod --all-namespaces"
register: pod_info
run_once: true
- name: 获取已下载离线镜像信息
@ -43,9 +40,8 @@
when: 'dns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建{{ dns_backend }}部署
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}"
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}.yaml"
run_once: true
delegate_to: "{{ groups.deploy[0] }}"
when:
- '"kube-dns" not in pod_info.stdout'
- '"coredns" not in pod_info.stdout'
@ -71,9 +67,9 @@
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 metrics-server部署
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/metrics-server"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/metrics-server"
run_once: true
connection: local
when: '"metrics-server" not in pod_info.stdout and metricsserver_install == "yes"'
ignore_errors: true
@ -97,10 +93,10 @@
when: 'dashboard_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 dashboard部署
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/dashboard && \
{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster-only"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/dashboard && \
{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster-only"
run_once: true
connection: local
when: '"kubernetes-dashboard" not in pod_info.stdout and dashboard_install == "yes"'
ignore_errors: true
@ -126,8 +122,8 @@
when: 'heapster_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
- name: 创建 heapster部署
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster.yaml"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster.yaml"
connection: local
run_once: true
when: '"heapster" not in pod_info.stdout and heapster_install == "yes"'
ignore_errors: true
@ -155,18 +151,10 @@
with_items:
- "metallb.yaml"
- "{{ metallb_protocol }}.yaml"
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
- name: 创建 metallb controller 部署
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/metallb.yaml"
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
- name: 创建 metallb configmap
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ metallb_protocol }}.yaml"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/metallb.yaml && \
{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ metallb_protocol }}.yaml"
run_once: true
when: '"metallb" not in pod_info.stdout and metallb_install == "yes"'
ignore_errors: true

View File

@ -154,11 +154,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
ports:
- name: monitoring
containerPort: 7472

View File

@ -1,9 +1,5 @@
# 部分flannel配置详见roles/flannel/templates/kube-flannel.yaml.j2
# 部分flannel配置参考 docs/setup/network-plugin/flannel.md
# 如果 node 节点有多块网卡,请设置 true
# 另外发现设置为 true 时能够解决v1.10使用ipvs偶尔出现pod内dial tcp 10.68.0.1:443: i/o timeout的 bug
NODE_WITH_MULTIPLE_NETWORKS: "true"
# 设置flannel 后端
#FLANNEL_BACKEND: "host-gw"
FLANNEL_BACKEND: "vxlan"

View File

@ -1,11 +1,8 @@
- block:
- name: 在deploy 节点创建相关目录
file: name=/opt/kube/kube-system/flannel state=directory
- name: 创建相关目录
file: name=/opt/kube/kube-system state=directory
- name: 配置 flannel DaemonSet yaml文件
template: src=kube-flannel.yaml.j2 dest=/opt/kube/kube-system/flannel/kube-flannel.yaml
delegate_to: "{{ groups.deploy[0] }}"
run_once: true
- name: 配置 flannel DaemonSet yaml文件
template: src=kube-flannel.yaml.j2 dest=/opt/kube/kube-system/flannel.yaml
- name: 创建flannel cni 相关目录
file: name={{ item }} state=directory
@ -61,8 +58,7 @@
# 只需单节点执行一次
- name: 运行 flannel网络
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/flannel/ && sleep 5"
delegate_to: "{{ groups.deploy[0] }}"
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/flannel.yaml && sleep 5"
run_once: true
# 删除原有cni配置
@ -74,8 +70,6 @@
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
delegate_to: "{{ groups.deploy[0] }}"
retries: 15
delay: 8
ignore_errors: true

View File

@ -141,14 +141,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
# if hosts have multiple net interfaces, set following two ENVs
- name: KUBERNETES_SERVICE_HOST
value: "{{ MASTER_IP }}"
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
- name: KUBERNETES_SERVICE_PORT
value: "{{ KUBE_APISERVER.split(':')[2] }}"
{% endif %}
volumeMounts:
- name: run
mountPath: /run

View File

@ -24,3 +24,10 @@ HARD_EVICTION: "memory.available<200Mi,nodefs.available<10%"
# "source": 基于请求源IP地址
# "uri": 基于请求的URI
BALANCE_ALG: "roundrobin"
# 设置 APISERVER 地址
KUBE_APISERVER: "{%- if groups['kube-master']|length > 1 -%} \
https://127.0.0.1:6443 \
{%- else -%} \
https://{{ groups['kube-master'][0] }}:6443 \
{%- endif -%}"

View File

@ -1,15 +1,3 @@
# 每个 node 节点运行 haproxy 连接到多个 apiserver
- import_tasks: node_lb.yml
when: "inventory_hostname not in groups['kube-master']"
- name: 替换 kubeconfig 的 apiserver 地址
lineinfile:
dest: /root/.kube/config
regexp: "^ server"
line: " server: https://127.0.0.1:6443"
when: "inventory_hostname not in groups['kube-master']"
# 创建kubelet,kube-proxy工作目录和cni配置目录
- name: 创建kube-node 相关目录
file: name={{ item }} state=directory
with_items:
@ -28,6 +16,17 @@
- loopback
tags: upgrade_k8s
# 每个 node 节点运行 haproxy 连接到多个 apiserver
- import_tasks: node_lb.yml
when: "inventory_hostname not in groups['kube-master']"
- name: 替换 kubeconfig 的 apiserver 地址
lineinfile:
dest: /root/.kube/config
regexp: "^ server"
line: " server: {{ KUBE_APISERVER }}"
when: "inventory_hostname not in groups['kube-master']"
##----------kubelet 配置部分--------------
- name: 准备kubelet 证书签名请求
@ -45,7 +44,7 @@
shell: "{{ bin_dir }}/kubectl config set-cluster kubernetes \
--certificate-authority={{ ca_dir }}/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--server={{ KUBE_APISERVER }} \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
when: "inventory_hostname not in groups['kube-master']"
@ -95,7 +94,7 @@
lineinfile:
dest: /etc/kubernetes/kube-proxy.kubeconfig
regexp: "^ server"
line: " server: https://127.0.0.1:6443"
line: " server: {{ KUBE_APISERVER }}"
when: "inventory_hostname not in groups['kube-master']"
- name: 替换 kube-proxy.kubeconfig 的 apiserver 地址

View File

@ -21,6 +21,11 @@
shell: systemctl enable haproxy
ignore_errors: true
- name: 重启haproxy服务
shell: systemctl restart haproxy
- name: 停止haproxy服务
shell: systemctl stop haproxy
tags: restart_lb
- name: 开启haproxy服务
shell: systemctl start haproxy
when: "groups['kube-master']|length > 1"
tags: restart_lb