diff --git a/playbooks/07.cluster-addon.yml b/playbooks/07.cluster-addon.yml index 6f29173..63b474f 100644 --- a/playbooks/07.cluster-addon.yml +++ b/playbooks/07.cluster-addon.yml @@ -1,5 +1,4 @@ # to install clust-addons -- hosts: - - kube_node +- hosts: localhost roles: - cluster-addon diff --git a/playbooks/90.setup.yml b/playbooks/90.setup.yml index 65ea1a1..97008de 100644 --- a/playbooks/90.setup.yml +++ b/playbooks/90.setup.yml @@ -71,7 +71,6 @@ - { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" } # to install cluster-addons -- hosts: - - kube_node +- hosts: localhost roles: - cluster-addon diff --git a/playbooks/96.update-certs.yml b/playbooks/96.update-certs.yml index 1fdb664..f04817e 100644 --- a/playbooks/96.update-certs.yml +++ b/playbooks/96.update-certs.yml @@ -41,7 +41,6 @@ - { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" } # to install cluster-addons -- hosts: - - kube_node +- hosts: localhost roles: - cluster-addon diff --git a/roles/cluster-addon/tasks/cilium_connectivity_check.yml b/roles/cluster-addon/tasks/cilium_connectivity_check.yml index dc174d4..3690f09 100644 --- a/roles/cluster-addon/tasks/cilium_connectivity_check.yml +++ b/roles/cluster-addon/tasks/cilium_connectivity_check.yml @@ -29,5 +29,4 @@ - debug: msg: "[重要]: 请查看命名空间cilium-test下所有pod,如果均为Running状态,且没有重启数增长,说明cilium连接测试正常。 \ 测试观察一段时间可以整体删除该命名空间所有资源(kubectl delete ns cilium-test)" - run_once: true - connection: local + when: 'cilium_connectivity_check|bool' diff --git a/roles/cluster-addon/tasks/coredns.yml b/roles/cluster-addon/tasks/coredns.yml index 97781da..b28c26a 100644 --- a/roles/cluster-addon/tasks/coredns.yml +++ b/roles/cluster-addon/tasks/coredns.yml @@ -1,9 +1,7 @@ -- name: 准备 DNS的部署文件 - template: src=dns/coredns.yaml.j2 dest={{ cluster_dir }}/yml/coredns.yaml - run_once: true - connection: local +- block: + - name: 准备 DNS的部署文件 + template: src=dns/coredns.yaml.j2 dest={{ cluster_dir }}/yml/coredns.yaml -- name: 创建coredns部署 - shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/coredns.yaml" - run_once: true - connection: local + - name: 创建coredns部署 + shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/coredns.yaml" + when: 'dns_install == "yes"' diff --git a/roles/cluster-addon/tasks/dashboard.yml b/roles/cluster-addon/tasks/dashboard.yml index 5301ed0..209216b 100644 --- a/roles/cluster-addon/tasks/dashboard.yml +++ b/roles/cluster-addon/tasks/dashboard.yml @@ -1,18 +1,14 @@ -- name: prepare some dirs - file: name={{ cluster_dir }}/yml/dashboard state=directory - run_once: true - connection: local +- block: + - name: prepare some dirs + file: name={{ cluster_dir }}/yml/dashboard state=directory -- name: 准备 dashboard的部署文件 - template: src=dashboard/{{ item }}.j2 dest={{ cluster_dir }}/yml/dashboard/{{ item }} - with_items: - - "kubernetes-dashboard.yaml" - - "admin-user-sa-rbac.yaml" - - "read-user-sa-rbac.yaml" - run_once: true - connection: local + - name: 准备 dashboard的部署文件 + template: src=dashboard/{{ item }}.j2 dest={{ cluster_dir }}/yml/dashboard/{{ item }} + with_items: + - "kubernetes-dashboard.yaml" + - "admin-user-sa-rbac.yaml" + - "read-user-sa-rbac.yaml" -- name: 创建 dashboard部署 - shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/dashboard/" - run_once: true - connection: local + - name: 创建 dashboard部署 + shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/dashboard/" + when: 'dashboard_install == "yes"' diff --git a/roles/cluster-addon/tasks/main.yml b/roles/cluster-addon/tasks/main.yml index 025f31b..ad1b911 100644 --- a/roles/cluster-addon/tasks/main.yml +++ b/roles/cluster-addon/tasks/main.yml @@ -1,8 +1,6 @@ - name: 获取所有已经创建的POD信息 command: "{{ base_dir }}/bin/kubectl get pod --all-namespaces" register: pod_info - connection: local - run_once: true - name: 注册变量 DNS_SVC_IP shell: echo {{ SERVICE_CIDR }}|cut -d/ -f1|awk -F. '{print $1"."$2"."$3"."$4+2}' @@ -32,7 +30,7 @@ when: '"nfs-client-provisioner" not in pod_info.stdout and nfs_provisioner_install == "yes"' - import_tasks: cilium_connectivity_check.yml - when: 'CLUSTER_NETWORK == "cilium" and cilium_connectivity_check|bool' + when: 'CLUSTER_NETWORK == "cilium"' - import_tasks: network_check.yml when: 'network_check_enabled|bool and CLUSTER_NETWORK != "cilium"' diff --git a/roles/cluster-addon/tasks/metrics-server.yml b/roles/cluster-addon/tasks/metrics-server.yml index 3e5950c..7b86a59 100644 --- a/roles/cluster-addon/tasks/metrics-server.yml +++ b/roles/cluster-addon/tasks/metrics-server.yml @@ -1,10 +1,7 @@ -- name: 准备 metrics-server的部署文件 - template: src=metrics-server/components.yaml.j2 dest={{ cluster_dir }}/yml/metrics-server.yaml - run_once: true - connection: local +- block: + - name: 准备 metrics-server的部署文件 + template: src=metrics-server/components.yaml.j2 dest={{ cluster_dir }}/yml/metrics-server.yaml - -- name: 创建 metrics-server部署 - shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/metrics-server.yaml" - run_once: true - connection: local + - name: 创建 metrics-server部署 + shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/metrics-server.yaml" + when: 'metricsserver_install == "yes"' diff --git a/roles/cluster-addon/tasks/network_check.yml b/roles/cluster-addon/tasks/network_check.yml index 1765e5f..6f2cd00 100644 --- a/roles/cluster-addon/tasks/network_check.yml +++ b/roles/cluster-addon/tasks/network_check.yml @@ -17,5 +17,4 @@ - debug: msg: "[重要]: 请查看命名空间network-test下所有pod,如果均为Completed状态,且没有重启数增长,说明网络连接测试正常。 \ 如果有Pending状态,部分测试需要多节点集群才能完成,如果希望禁用网络测试执行(kubectl delete ns network-test)" - run_once: true - connection: local + when: 'network_check_enabled|bool' diff --git a/roles/cluster-addon/tasks/nfs-provisioner.yml b/roles/cluster-addon/tasks/nfs-provisioner.yml index 6f8842b..5dc5606 100644 --- a/roles/cluster-addon/tasks/nfs-provisioner.yml +++ b/roles/cluster-addon/tasks/nfs-provisioner.yml @@ -1,17 +1,13 @@ -- name: 准备 nfs-provisioner 配置目录 - file: name={{ cluster_dir }}/yml/nfs-provisioner state=directory - run_once: true - connection: local +- block: + - name: 准备 nfs-provisioner 配置目录 + file: name={{ cluster_dir }}/yml/nfs-provisioner state=directory -- name: 准备 nfs-provisioner部署文件 - template: src=nfs-provisioner/{{ item }}.j2 dest={{ cluster_dir }}/yml/nfs-provisioner/{{ item }} - with_items: - - "nfs-provisioner.yaml" - - "test-pod.yaml" - run_once: true - connection: local + - name: 准备 nfs-provisioner部署文件 + template: src=nfs-provisioner/{{ item }}.j2 dest={{ cluster_dir }}/yml/nfs-provisioner/{{ item }} + with_items: + - "nfs-provisioner.yaml" + - "test-pod.yaml" -- name: 创建 nfs-provisioner部署 - shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nfs-provisioner/nfs-provisioner.yaml" - run_once: true - connection: local + - name: 创建 nfs-provisioner部署 + shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nfs-provisioner/nfs-provisioner.yaml" + when: 'nfs_provisioner_install == "yes"' diff --git a/roles/cluster-addon/tasks/nodelocaldns.yml b/roles/cluster-addon/tasks/nodelocaldns.yml index 9b3697b..be47fd1 100644 --- a/roles/cluster-addon/tasks/nodelocaldns.yml +++ b/roles/cluster-addon/tasks/nodelocaldns.yml @@ -1,16 +1,12 @@ -- name: 准备dnscache的部署文件 - template: src=dns/nodelocaldns-ipvs.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml - when: "PROXY_MODE == 'ipvs'" - run_once: true - connection: local +- block: + - name: 准备dnscache的部署文件 + template: src=dns/nodelocaldns-ipvs.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml + when: "PROXY_MODE == 'ipvs'" -- name: 准备dnscache的部署文件 - template: src=dns/nodelocaldns-iptables.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml - when: "PROXY_MODE == 'iptables'" - run_once: true - connection: local + - name: 准备dnscache的部署文件 + template: src=dns/nodelocaldns-iptables.yaml.j2 dest={{ cluster_dir }}/yml/nodelocaldns.yaml + when: "PROXY_MODE == 'iptables'" -- name: 创建dnscache部署 - shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nodelocaldns.yaml" - run_once: true - connection: local + - name: 创建dnscache部署 + shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/yml/nodelocaldns.yaml" + when: 'ENABLE_LOCAL_DNS_CACHE|bool' diff --git a/roles/cluster-addon/tasks/prometheus.yml b/roles/cluster-addon/tasks/prometheus.yml index 7234cb5..345dc73 100644 --- a/roles/cluster-addon/tasks/prometheus.yml +++ b/roles/cluster-addon/tasks/prometheus.yml @@ -42,5 +42,4 @@ shell: "{{ base_dir }}/bin/helm upgrade prometheus --install \ -n {{ prom_namespace }} -f {{ cluster_dir }}/yml/prom-values.yaml \ {{ base_dir }}/roles/cluster-addon/files/kube-prometheus-stack-{{ prom_chart_ver }}.tgz" - run_once: true - connection: local + when: 'prom_install == "yes"' diff --git a/roles/cluster-addon/vars/main.yml b/roles/cluster-addon/vars/main.yml index d6c93d3..23e2510 100644 --- a/roles/cluster-addon/vars/main.yml +++ b/roles/cluster-addon/vars/main.yml @@ -1 +1,4 @@ # default values + +# CHANGE_CA: when set true, force to change ca certs +CHANGE_CA: false