From d17d938dfc70aaf165dc8ca0ea76e45ad6ee3172 Mon Sep 17 00:00:00 2001 From: gjmzj Date: Wed, 29 May 2019 20:45:25 +0800 Subject: [PATCH] =?UTF-8?q?=E8=B0=83=E6=95=B4network/cluster-addon?= =?UTF-8?q?=E5=AE=89=E8=A3=85=E6=B5=81=E7=A8=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 90.setup.yml | 23 ++----- roles/calico/templates/calico-v3.2.yaml.j2 | 7 --- roles/calico/templates/calico-v3.3.yaml.j2 | 7 --- roles/calico/templates/calico-v3.4.yaml.j2 | 7 --- roles/cilium/templates/cilium.yaml.j2 | 16 ----- roles/cluster-addon/defaults/main.yml | 2 +- roles/cluster-addon/tasks/ingress.yml | 8 +-- roles/cluster-addon/tasks/main.yml | 62 ++++++++----------- .../templates/metallb/metallb.yaml.j2 | 5 -- roles/flannel/defaults/main.yml | 6 +- roles/flannel/tasks/main.yml | 16 ++--- roles/flannel/templates/kube-flannel.yaml.j2 | 8 --- roles/kube-node/defaults/main.yml | 7 +++ roles/kube-node/tasks/main.yml | 27 ++++---- roles/kube-node/tasks/node_lb.yml | 9 ++- 15 files changed, 69 insertions(+), 141 deletions(-) diff --git a/90.setup.yml b/90.setup.yml index 2b4ea93..16299b1 100644 --- a/90.setup.yml +++ b/90.setup.yml @@ -1,10 +1,10 @@ # [optional] to synchronize time of nodes with 'chrony' - hosts: all roles: - - { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" } + - { role: chrony, when: "groups['chrony']|length > 0" } -# to create CA, kubeconfig, kube-proxy.kubeconfig etc. on 'deploy' node -- hosts: deploy +# to create CA, kubeconfig, kube-proxy.kubeconfig etc. +- hosts: localhost roles: - deploy @@ -12,17 +12,10 @@ - hosts: - kube-master - kube-node - - deploy - etcd - - lb roles: - prepare -# [optional] to install loadbalance service, only needed by multi-master cluster -- hosts: lb - roles: - - lb - # to install etcd cluster - hosts: etcd roles: @@ -41,23 +34,20 @@ roles: - kube-master - kube-node - # tasks: - name: Making master nodes SchedulingDisabled shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} " - delegate_to: "{{ groups.deploy[0] }}" - when: DEPLOY_MODE != "allinone" + when: "inventory_hostname not in groups['kube-node']" ignore_errors: true - name: Setting master role name shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite" ignore_errors: true - delegate_to: "{{ groups.deploy[0] }}" # to set up 'kube-node' nodes - hosts: kube-node roles: - - { role: kube-node, when: "DEPLOY_MODE != 'allinone'" } + - { role: kube-node, when: "inventory_hostname not in groups['kube-master']" } # to install network plugin, only one can be choosen - hosts: @@ -74,5 +64,4 @@ - hosts: - kube-node roles: - - cluster-addon - + - cluster-addon diff --git a/roles/calico/templates/calico-v3.2.yaml.j2 b/roles/calico/templates/calico-v3.2.yaml.j2 index 13c8584..4e4535c 100644 --- a/roles/calico/templates/calico-v3.2.yaml.j2 +++ b/roles/calico/templates/calico-v3.2.yaml.j2 @@ -361,13 +361,6 @@ spec: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: policy,profile,workloadendpoint,node -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} volumeMounts: # Mount in the etcd TLS secrets. - mountPath: /calico-secrets diff --git a/roles/calico/templates/calico-v3.3.yaml.j2 b/roles/calico/templates/calico-v3.3.yaml.j2 index c644f15..b25548b 100644 --- a/roles/calico/templates/calico-v3.3.yaml.j2 +++ b/roles/calico/templates/calico-v3.3.yaml.j2 @@ -370,13 +370,6 @@ spec: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: policy,namespace,serviceaccount,workloadendpoint,node -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} volumeMounts: # Mount in the etcd TLS secrets. - mountPath: /calico-secrets diff --git a/roles/calico/templates/calico-v3.4.yaml.j2 b/roles/calico/templates/calico-v3.4.yaml.j2 index 17cb022..d6195c4 100644 --- a/roles/calico/templates/calico-v3.4.yaml.j2 +++ b/roles/calico/templates/calico-v3.4.yaml.j2 @@ -374,13 +374,6 @@ spec: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: policy,namespace,serviceaccount,workloadendpoint,node -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} volumeMounts: # Mount in the etcd TLS secrets. - mountPath: /calico-secrets diff --git a/roles/cilium/templates/cilium.yaml.j2 b/roles/cilium/templates/cilium.yaml.j2 index 30cbd82..695b604 100644 --- a/roles/cilium/templates/cilium.yaml.j2 +++ b/roles/cilium/templates/cilium.yaml.j2 @@ -346,14 +346,6 @@ spec: key: tofqdns-pre-cache name: cilium-config optional: true -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} image: docker.io/cilium/cilium:{{ cilium_ver }} imagePullPolicy: IfNotPresent lifecycle: @@ -886,14 +878,6 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.uid -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} image: docker.io/cilium/cilium-etcd-operator:v2.0.5 imagePullPolicy: IfNotPresent name: cilium-etcd-operator diff --git a/roles/cluster-addon/defaults/main.yml b/roles/cluster-addon/defaults/main.yml index 295be49..ee7899c 100644 --- a/roles/cluster-addon/defaults/main.yml +++ b/roles/cluster-addon/defaults/main.yml @@ -1,4 +1,4 @@ -# dns 自动安装,可选"coredns"和“kubedns” +# dns 自动安装,'dns_backend'可选"coredns"和“kubedns” dns_install: "yes" dns_backend: "coredns" kubednsVer: "1.14.13" diff --git a/roles/cluster-addon/tasks/ingress.yml b/roles/cluster-addon/tasks/ingress.yml index dce1b85..50d9483 100644 --- a/roles/cluster-addon/tasks/ingress.yml +++ b/roles/cluster-addon/tasks/ingress.yml @@ -17,8 +17,8 @@ when: 'traefik_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' - name: 创建 traefik部署 - shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml" + connection: local run_once: true when: 'ingress_backend == "traefik"' ignore_errors: true @@ -42,8 +42,8 @@ when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' - name: 创建 nginx_ingress部署 - shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml" + connection: local run_once: true when: 'ingress_backend == "nginx-ingress"' ignore_errors: true diff --git a/roles/cluster-addon/tasks/main.yml b/roles/cluster-addon/tasks/main.yml index a2bfdae..0f04b91 100644 --- a/roles/cluster-addon/tasks/main.yml +++ b/roles/cluster-addon/tasks/main.yml @@ -1,21 +1,18 @@ -- block: - - name: 在deploy 节点创建相关目录 - file: path={{ item }} state=directory - with_items: - - /opt/kube/kube-system/kubedns - - /opt/kube/kube-system/coredns - - # DNS文件中部分参数根据hosts文件设置而定,因此需要用template模块替换参数 - - name: 准备 DNS的部署文件 - template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}/{{ item }}.yaml - with_items: - - kubedns - - coredns - - - name: 获取所有已经创建的POD信息 - command: "{{ bin_dir }}/kubectl get pod --all-namespaces" - register: pod_info - delegate_to: "{{ groups.deploy[0] }}" +- name: 在 node 节点创建相关目录 + file: path={{ item }} state=directory + with_items: + - /opt/kube/kube-system + +# DNS文件中部分参数根据hosts文件设置而定,因此需要用template模块替换参数 +- name: 准备 DNS的部署文件 + template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}.yaml + with_items: + - kubedns + - coredns + +- name: 获取所有已经创建的POD信息 + command: "{{ bin_dir }}/kubectl get pod --all-namespaces" + register: pod_info run_once: true - name: 获取已下载离线镜像信息 @@ -43,9 +40,8 @@ when: 'dns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' - name: 创建{{ dns_backend }}部署 - shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}" + shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}.yaml" run_once: true - delegate_to: "{{ groups.deploy[0] }}" when: - '"kube-dns" not in pod_info.stdout' - '"coredns" not in pod_info.stdout' @@ -71,9 +67,9 @@ when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' - name: 创建 metrics-server部署 - shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/metrics-server" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/metrics-server" run_once: true + connection: local when: '"metrics-server" not in pod_info.stdout and metricsserver_install == "yes"' ignore_errors: true @@ -97,10 +93,10 @@ when: 'dashboard_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' - name: 创建 dashboard部署 - shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/dashboard && \ - {{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster-only" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/dashboard && \ + {{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster-only" run_once: true + connection: local when: '"kubernetes-dashboard" not in pod_info.stdout and dashboard_install == "yes"' ignore_errors: true @@ -126,8 +122,8 @@ when: 'heapster_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"' - name: 创建 heapster部署 - shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster.yaml" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster.yaml" + connection: local run_once: true when: '"heapster" not in pod_info.stdout and heapster_install == "yes"' ignore_errors: true @@ -155,18 +151,10 @@ with_items: - "metallb.yaml" - "{{ metallb_protocol }}.yaml" - delegate_to: "{{ groups.deploy[0] }}" - run_once: true - name: 创建 metallb controller 部署 - shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/metallb.yaml" - delegate_to: "{{ groups.deploy[0] }}" - run_once: true - - - name: 创建 metallb configmap - shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ metallb_protocol }}.yaml" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/metallb.yaml && \ + {{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ metallb_protocol }}.yaml" run_once: true when: '"metallb" not in pod_info.stdout and metallb_install == "yes"' ignore_errors: true - diff --git a/roles/cluster-addon/templates/metallb/metallb.yaml.j2 b/roles/cluster-addon/templates/metallb/metallb.yaml.j2 index 98dd2cb..3d44aca 100644 --- a/roles/cluster-addon/templates/metallb/metallb.yaml.j2 +++ b/roles/cluster-addon/templates/metallb/metallb.yaml.j2 @@ -154,11 +154,6 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" ports: - name: monitoring containerPort: 7472 diff --git a/roles/flannel/defaults/main.yml b/roles/flannel/defaults/main.yml index 258143b..ed2aec7 100644 --- a/roles/flannel/defaults/main.yml +++ b/roles/flannel/defaults/main.yml @@ -1,9 +1,5 @@ -# 部分flannel配置,详见roles/flannel/templates/kube-flannel.yaml.j2 +# 部分flannel配置,参考 docs/setup/network-plugin/flannel.md -# 如果 node 节点有多块网卡,请设置 true -# 另外发现设置为 true 时能够解决v1.10使用ipvs偶尔出现pod内‘dial tcp 10.68.0.1:443: i/o timeout’的 bug -NODE_WITH_MULTIPLE_NETWORKS: "true" - # 设置flannel 后端 #FLANNEL_BACKEND: "host-gw" FLANNEL_BACKEND: "vxlan" diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 2551767..3e02570 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -1,11 +1,8 @@ -- block: - - name: 在deploy 节点创建相关目录 - file: name=/opt/kube/kube-system/flannel state=directory +- name: 创建相关目录 + file: name=/opt/kube/kube-system state=directory - - name: 配置 flannel DaemonSet yaml文件 - template: src=kube-flannel.yaml.j2 dest=/opt/kube/kube-system/flannel/kube-flannel.yaml - delegate_to: "{{ groups.deploy[0] }}" - run_once: true +- name: 配置 flannel DaemonSet yaml文件 + template: src=kube-flannel.yaml.j2 dest=/opt/kube/kube-system/flannel.yaml - name: 创建flannel cni 相关目录 file: name={{ item }} state=directory @@ -61,8 +58,7 @@ # 只需单节点执行一次 - name: 运行 flannel网络 - shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/flannel/ && sleep 5" - delegate_to: "{{ groups.deploy[0] }}" + shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/flannel.yaml && sleep 5" run_once: true # 删除原有cni配置 @@ -74,8 +70,6 @@ shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ inventory_hostname }} '|awk '{print $3}'" register: pod_status until: pod_status.stdout == "Running" - delegate_to: "{{ groups.deploy[0] }}" retries: 15 delay: 8 ignore_errors: true - diff --git a/roles/flannel/templates/kube-flannel.yaml.j2 b/roles/flannel/templates/kube-flannel.yaml.j2 index 21d701e..083d412 100644 --- a/roles/flannel/templates/kube-flannel.yaml.j2 +++ b/roles/flannel/templates/kube-flannel.yaml.j2 @@ -141,14 +141,6 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace -{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %} - # if hosts have multiple net interfaces, set following two ENVs - - name: KUBERNETES_SERVICE_HOST - value: "{{ MASTER_IP }}" - #value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}" - - name: KUBERNETES_SERVICE_PORT - value: "{{ KUBE_APISERVER.split(':')[2] }}" -{% endif %} volumeMounts: - name: run mountPath: /run diff --git a/roles/kube-node/defaults/main.yml b/roles/kube-node/defaults/main.yml index b169db3..61a1578 100644 --- a/roles/kube-node/defaults/main.yml +++ b/roles/kube-node/defaults/main.yml @@ -24,3 +24,10 @@ HARD_EVICTION: "memory.available<200Mi,nodefs.available<10%" # "source": 基于请求源IP地址 # "uri": 基于请求的URI BALANCE_ALG: "roundrobin" + +# 设置 APISERVER 地址 +KUBE_APISERVER: "{%- if groups['kube-master']|length > 1 -%} \ + https://127.0.0.1:6443 \ + {%- else -%} \ + https://{{ groups['kube-master'][0] }}:6443 \ + {%- endif -%}" diff --git a/roles/kube-node/tasks/main.yml b/roles/kube-node/tasks/main.yml index 3e167fa..84209fa 100644 --- a/roles/kube-node/tasks/main.yml +++ b/roles/kube-node/tasks/main.yml @@ -1,15 +1,3 @@ -# 每个 node 节点运行 haproxy 连接到多个 apiserver -- import_tasks: node_lb.yml - when: "inventory_hostname not in groups['kube-master']" - -- name: 替换 kubeconfig 的 apiserver 地址 - lineinfile: - dest: /root/.kube/config - regexp: "^ server" - line: " server: https://127.0.0.1:6443" - when: "inventory_hostname not in groups['kube-master']" - -# 创建kubelet,kube-proxy工作目录和cni配置目录 - name: 创建kube-node 相关目录 file: name={{ item }} state=directory with_items: @@ -28,6 +16,17 @@ - loopback tags: upgrade_k8s +# 每个 node 节点运行 haproxy 连接到多个 apiserver +- import_tasks: node_lb.yml + when: "inventory_hostname not in groups['kube-master']" + +- name: 替换 kubeconfig 的 apiserver 地址 + lineinfile: + dest: /root/.kube/config + regexp: "^ server" + line: " server: {{ KUBE_APISERVER }}" + when: "inventory_hostname not in groups['kube-master']" + ##----------kubelet 配置部分-------------- - name: 准备kubelet 证书签名请求 @@ -45,7 +44,7 @@ shell: "{{ bin_dir }}/kubectl config set-cluster kubernetes \ --certificate-authority={{ ca_dir }}/ca.pem \ --embed-certs=true \ - --server=https://127.0.0.1:6443 \ + --server={{ KUBE_APISERVER }} \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig" when: "inventory_hostname not in groups['kube-master']" @@ -95,7 +94,7 @@ lineinfile: dest: /etc/kubernetes/kube-proxy.kubeconfig regexp: "^ server" - line: " server: https://127.0.0.1:6443" + line: " server: {{ KUBE_APISERVER }}" when: "inventory_hostname not in groups['kube-master']" - name: 替换 kube-proxy.kubeconfig 的 apiserver 地址 diff --git a/roles/kube-node/tasks/node_lb.yml b/roles/kube-node/tasks/node_lb.yml index c061742..b2fa033 100644 --- a/roles/kube-node/tasks/node_lb.yml +++ b/roles/kube-node/tasks/node_lb.yml @@ -21,6 +21,11 @@ shell: systemctl enable haproxy ignore_errors: true -- name: 重启haproxy服务 - shell: systemctl restart haproxy +- name: 停止haproxy服务 + shell: systemctl stop haproxy + tags: restart_lb + +- name: 开启haproxy服务 + shell: systemctl start haproxy + when: "groups['kube-master']|length > 1" tags: restart_lb