Rename master to control plane - non-breaking changes only (#11394)
K8s is moving away from the "master" terminology, so kubespray should follow the same naming conventions. See 65d886bb30/sig-architecture/naming/recommendations/001-master-control-plane.md
pull/11512/head
parent
d4bf3b9dc7
commit
4b324cb0f0
|
@ -60,17 +60,17 @@ You can create many different kubernetes topologies by setting the number of
|
||||||
different classes of hosts. For each class there are options for allocating
|
different classes of hosts. For each class there are options for allocating
|
||||||
floating IP addresses or not.
|
floating IP addresses or not.
|
||||||
|
|
||||||
- Master nodes with etcd
|
- Control plane nodes with etcd
|
||||||
- Master nodes without etcd
|
- Control plane nodes without etcd
|
||||||
- Standalone etcd hosts
|
- Standalone etcd hosts
|
||||||
- Kubernetes worker nodes
|
- Kubernetes worker nodes
|
||||||
|
|
||||||
Note that the Ansible script will report an invalid configuration if you wind up
|
Note that the Ansible script will report an invalid configuration if you wind up
|
||||||
with an even number of etcd instances since that is not a valid configuration. This
|
with an even number of etcd instances since that is not a valid configuration. This
|
||||||
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
restriction includes standalone etcd nodes that are deployed in a cluster along with
|
||||||
master nodes with etcd replicas. As an example, if you have three master nodes with
|
control plane nodes with etcd replicas. As an example, if you have three control plane
|
||||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
nodes with etcd replicas and three standalone etcd nodes, the script will fail since
|
||||||
now six total etcd replicas.
|
there are now six total etcd replicas.
|
||||||
|
|
||||||
### GlusterFS shared file system
|
### GlusterFS shared file system
|
||||||
|
|
||||||
|
|
|
@ -155,6 +155,7 @@ The following tags are defined in playbooks:
|
||||||
| container_engine_accelerator | Enable nvidia accelerator for runtimes |
|
| container_engine_accelerator | Enable nvidia accelerator for runtimes |
|
||||||
| container-engine | Configuring container engines |
|
| container-engine | Configuring container engines |
|
||||||
| container-runtimes | Configuring container runtimes |
|
| container-runtimes | Configuring container runtimes |
|
||||||
|
| control-plane | Configuring K8s control plane node role |
|
||||||
| coredns | Configuring coredns deployment |
|
| coredns | Configuring coredns deployment |
|
||||||
| crio | Configuring crio container engine for hosts |
|
| crio | Configuring crio container engine for hosts |
|
||||||
| crun | Configuring crun runtime |
|
| crun | Configuring crun runtime |
|
||||||
|
@ -199,7 +200,7 @@ The following tags are defined in playbooks:
|
||||||
| local-path-provisioner | Configure External provisioner: local-path |
|
| local-path-provisioner | Configure External provisioner: local-path |
|
||||||
| local-volume-provisioner | Configure External provisioner: local-volume |
|
| local-volume-provisioner | Configure External provisioner: local-volume |
|
||||||
| macvlan | Network plugin macvlan |
|
| macvlan | Network plugin macvlan |
|
||||||
| master | Configuring K8s master node role |
|
| master (DEPRECATED) | Deprecated - see `control-plane` |
|
||||||
| metallb | Installing and configuring metallb |
|
| metallb | Installing and configuring metallb |
|
||||||
| metrics_server | Configuring metrics_server |
|
| metrics_server | Configuring metrics_server |
|
||||||
| netchecker | Installing netchecker K8s app |
|
| netchecker | Installing netchecker K8s app |
|
||||||
|
@ -210,7 +211,7 @@ The following tags are defined in playbooks:
|
||||||
| node | Configuring K8s minion (compute) node role |
|
| node | Configuring K8s minion (compute) node role |
|
||||||
| nodelocaldns | Configuring nodelocaldns daemonset |
|
| nodelocaldns | Configuring nodelocaldns daemonset |
|
||||||
| node-label | Tasks linked to labeling of nodes |
|
| node-label | Tasks linked to labeling of nodes |
|
||||||
| node-webhook | Tasks linked to webhook (grating access to resources) |
|
| node-webhook | Tasks linked to webhook (granting access to resources)|
|
||||||
| nvidia_gpu | Enable nvidia accelerator for runtimes |
|
| nvidia_gpu | Enable nvidia accelerator for runtimes |
|
||||||
| oci | Cloud provider: oci |
|
| oci | Cloud provider: oci |
|
||||||
| persistent_volumes | Configure csi volumes |
|
| persistent_volumes | Configure csi volumes |
|
||||||
|
|
|
@ -14,7 +14,7 @@ Installs docker in etcd group members and runs etcd on docker containers. Only u
|
||||||
|
|
||||||
### Kubeadm
|
### Kubeadm
|
||||||
|
|
||||||
This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod in master hosts.
|
This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod on control plane hosts.
|
||||||
|
|
||||||
## Metrics
|
## Metrics
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
tasks:
|
tasks:
|
||||||
- name: Include kubespray-default variables
|
- name: Include kubespray-default variables
|
||||||
include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
|
include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
|
||||||
- name: Copy get_cinder_pvs.sh to master
|
- name: Copy get_cinder_pvs.sh to first control plane node
|
||||||
copy:
|
copy:
|
||||||
src: get_cinder_pvs.sh
|
src: get_cinder_pvs.sh
|
||||||
dest: /tmp
|
dest: /tmp
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
|
||||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
- name: Handle upgrades to control plane components first to maintain backwards compat.
|
||||||
hosts: kube_control_plane
|
hosts: kube_control_plane
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: 1
|
serial: 1
|
||||||
|
|
|
@ -75,8 +75,8 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||||
# skip_http_proxy_on_os_packages: false
|
# skip_http_proxy_on_os_packages: false
|
||||||
|
|
||||||
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
|
||||||
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
|
## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes
|
||||||
## no_proxy variable, set below to true:
|
## in the no_proxy variable, set below to true:
|
||||||
no_proxy_exclude_workers: false
|
no_proxy_exclude_workers: false
|
||||||
|
|
||||||
## Certificate Management
|
## Certificate Management
|
||||||
|
|
|
@ -272,7 +272,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||||
# kube_cpu_reserved: 100m
|
# kube_cpu_reserved: 100m
|
||||||
# kube_ephemeral_storage_reserved: 2Gi
|
# kube_ephemeral_storage_reserved: 2Gi
|
||||||
# kube_pid_reserved: "1000"
|
# kube_pid_reserved: "1000"
|
||||||
# Reservation for master hosts
|
# Reservation for control plane hosts
|
||||||
# kube_master_memory_reserved: 512Mi
|
# kube_master_memory_reserved: 512Mi
|
||||||
# kube_master_cpu_reserved: 200m
|
# kube_master_cpu_reserved: 200m
|
||||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
- { role: remove-node/remove-etcd-node }
|
- { role: remove-node/remove-etcd-node }
|
||||||
- { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
|
- { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
|
||||||
|
|
||||||
# Currently cannot remove first master or etcd
|
# Currently cannot remove first control plane node or first etcd node
|
||||||
- name: Post node removal
|
- name: Post node removal
|
||||||
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
- name: Install etcd
|
- name: Install etcd
|
||||||
import_playbook: install_etcd.yml
|
import_playbook: install_etcd.yml
|
||||||
|
|
||||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
- name: Handle upgrades to control plane components first to maintain backwards compat.
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
hosts: kube_control_plane
|
hosts: kube_control_plane
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
@ -60,7 +60,7 @@
|
||||||
- { role: kubernetes-apps, tags: csi-driver }
|
- { role: kubernetes-apps, tags: csi-driver }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
|
- name: Upgrade calico and external cloud provider on all control plane nodes, calico-rrs, and nodes
|
||||||
hosts: kube_control_plane:calico_rr:kube_node
|
hosts: kube_control_plane:calico_rr:kube_node
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
|
|
@ -13,19 +13,19 @@
|
||||||
service:
|
service:
|
||||||
name: etcd
|
name: etcd
|
||||||
state: restarted
|
state: restarted
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
listen: Restart etcd
|
listen: Restart etcd
|
||||||
|
|
||||||
- name: Reload etcd-events
|
- name: Reload etcd-events
|
||||||
service:
|
service:
|
||||||
name: etcd-events
|
name: etcd-events
|
||||||
state: restarted
|
state: restarted
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
listen: Restart etcd-events
|
listen: Restart etcd-events
|
||||||
|
|
||||||
- name: Wait for etcd up
|
- name: Wait for etcd up
|
||||||
uri:
|
uri:
|
||||||
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
|
||||||
validate_certs: false
|
validate_certs: false
|
||||||
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
||||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||||
|
@ -40,7 +40,7 @@
|
||||||
|
|
||||||
- name: Wait for etcd-events up
|
- name: Wait for etcd-events up
|
||||||
uri:
|
uri:
|
||||||
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
|
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
|
||||||
validate_certs: false
|
validate_certs: false
|
||||||
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
|
||||||
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
check_mode: false
|
check_mode: false
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- is_etcd_master
|
- ('etcd' in group_names)
|
||||||
- etcd_cluster_setup
|
- etcd_cluster_setup
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
check_mode: false
|
check_mode: false
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- is_etcd_master
|
- ('etcd' in group_names)
|
||||||
- etcd_events_cluster_setup
|
- etcd_events_cluster_setup
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -43,7 +43,7 @@
|
||||||
|
|
||||||
- name: Configure | Refresh etcd config
|
- name: Configure | Refresh etcd config
|
||||||
include_tasks: refresh_config.yml
|
include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
|
|
||||||
- name: Configure | Copy etcd.service systemd file
|
- name: Configure | Copy etcd.service systemd file
|
||||||
template:
|
template:
|
||||||
|
@ -54,7 +54,9 @@
|
||||||
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
||||||
# Remove once we drop support for systemd < 250
|
# Remove once we drop support for systemd < 250
|
||||||
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-{{ etcd_deployment_type }}.service'"
|
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-{{ etcd_deployment_type }}.service'"
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_cluster_setup
|
||||||
|
|
||||||
- name: Configure | Copy etcd-events.service systemd file
|
- name: Configure | Copy etcd-events.service systemd file
|
||||||
template:
|
template:
|
||||||
|
@ -65,12 +67,14 @@
|
||||||
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
|
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
|
||||||
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
|
||||||
# Remove once we drop support for systemd < 250
|
# Remove once we drop support for systemd < 250
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_events_cluster_setup
|
||||||
|
|
||||||
- name: Configure | reload systemd
|
- name: Configure | reload systemd
|
||||||
systemd_service:
|
systemd_service:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
|
|
||||||
# when scaling new etcd will fail to start
|
# when scaling new etcd will fail to start
|
||||||
- name: Configure | Ensure etcd is running
|
- name: Configure | Ensure etcd is running
|
||||||
|
@ -79,7 +83,9 @@
|
||||||
state: started
|
state: started
|
||||||
enabled: true
|
enabled: true
|
||||||
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
|
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_cluster_setup
|
||||||
|
|
||||||
# when scaling new etcd will fail to start
|
# when scaling new etcd will fail to start
|
||||||
- name: Configure | Ensure etcd-events is running
|
- name: Configure | Ensure etcd-events is running
|
||||||
|
@ -88,7 +94,9 @@
|
||||||
state: started
|
state: started
|
||||||
enabled: true
|
enabled: true
|
||||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
|
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_events_cluster_setup
|
||||||
|
|
||||||
- name: Configure | Wait for etcd cluster to be healthy
|
- name: Configure | Wait for etcd cluster to be healthy
|
||||||
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null"
|
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null"
|
||||||
|
@ -102,7 +110,7 @@
|
||||||
check_mode: false
|
check_mode: false
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- is_etcd_master
|
- ('etcd' in group_names)
|
||||||
- etcd_cluster_setup
|
- etcd_cluster_setup
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -125,7 +133,7 @@
|
||||||
check_mode: false
|
check_mode: false
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- is_etcd_master
|
- ('etcd' in group_names)
|
||||||
- etcd_events_cluster_setup
|
- etcd_events_cluster_setup
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -142,7 +150,9 @@
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: false
|
check_mode: false
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_cluster_setup
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
|
@ -158,7 +168,9 @@
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: false
|
check_mode: false
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_events_cluster_setup
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
- name: Trust etcd CA
|
- name: Trust etcd CA
|
||||||
include_tasks: upd_ca_trust.yml
|
include_tasks: upd_ca_trust.yml
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
|
- ('etcd' in group_names) or ('kube_control_plane' in group_names)
|
||||||
tags:
|
tags:
|
||||||
- etcd-secrets
|
- etcd-secrets
|
||||||
|
|
||||||
|
@ -39,7 +39,8 @@
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
- inventory_hostname in groups['k8s_cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
tags:
|
tags:
|
||||||
- master
|
- master # master tag is deprecated and replaced by control-plane
|
||||||
|
- control-plane
|
||||||
- network
|
- network
|
||||||
|
|
||||||
- name: Set etcd_client_cert_serial
|
- name: Set etcd_client_cert_serial
|
||||||
|
@ -50,7 +51,8 @@
|
||||||
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
- kube_network_plugin != "calico" or calico_datastore == "etcd"
|
||||||
- inventory_hostname in groups['k8s_cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
tags:
|
tags:
|
||||||
- master
|
- master # master tag is deprecated and replaced by control-plane
|
||||||
|
- control-plane
|
||||||
- network
|
- network
|
||||||
|
|
||||||
- name: Install etcdctl and etcdutl binary
|
- name: Install etcdctl and etcdutl binary
|
||||||
|
@ -61,36 +63,42 @@
|
||||||
- etcdutl
|
- etcdutl
|
||||||
- upgrade
|
- upgrade
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['etcd']
|
- ('etcd' in group_names)
|
||||||
- etcd_cluster_setup
|
- etcd_cluster_setup
|
||||||
|
|
||||||
- name: Install etcd
|
- name: Install etcd
|
||||||
include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
include_tasks: "install_{{ etcd_deployment_type }}.yml"
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: Configure etcd
|
- name: Configure etcd
|
||||||
include_tasks: configure.yml
|
include_tasks: configure.yml
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
|
|
||||||
- name: Refresh etcd config
|
- name: Refresh etcd config
|
||||||
include_tasks: refresh_config.yml
|
include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
|
|
||||||
- name: Restart etcd if certs changed
|
- name: Restart etcd if certs changed
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: Restart etcd
|
notify: Restart etcd
|
||||||
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_cluster_setup
|
||||||
|
- etcd_secret_changed | default(false)
|
||||||
|
|
||||||
- name: Restart etcd-events if certs changed
|
- name: Restart etcd-events if certs changed
|
||||||
command: /bin/true
|
command: /bin/true
|
||||||
notify: Restart etcd
|
notify: Restart etcd
|
||||||
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_events_cluster_setup
|
||||||
|
- etcd_secret_changed | default(false)
|
||||||
|
|
||||||
# After etcd cluster is assembled, make sure that
|
# After etcd cluster is assembled, make sure that
|
||||||
# initial state of the cluster is in `existing`
|
# initial state of the cluster is in `existing`
|
||||||
# state instead of `new`.
|
# state instead of `new`.
|
||||||
- name: Refresh etcd config again for idempotency
|
- name: Refresh etcd config again for idempotency
|
||||||
include_tasks: refresh_config.yml
|
include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master
|
when: ('etcd' in group_names)
|
||||||
|
|
|
@ -5,7 +5,9 @@
|
||||||
dest: /etc/etcd.env
|
dest: /etc/etcd.env
|
||||||
mode: "0640"
|
mode: "0640"
|
||||||
notify: Restart etcd
|
notify: Restart etcd
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_cluster_setup
|
||||||
|
|
||||||
- name: Refresh config | Create etcd-events config file
|
- name: Refresh config | Create etcd-events config file
|
||||||
template:
|
template:
|
||||||
|
@ -13,4 +15,6 @@
|
||||||
dest: /etc/etcd-events.env
|
dest: /etc/etcd-events.env
|
||||||
mode: "0640"
|
mode: "0640"
|
||||||
notify: Restart etcd-events
|
notify: Restart etcd-events
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when:
|
||||||
|
- ('etcd' in group_names)
|
||||||
|
- etcd_events_cluster_setup
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
# If all masters have node role, there are no tainted master and toleration should not be specified.
|
# If all control plane nodes have the node role, there are no tainted control plane nodes and toleration should not be specified.
|
||||||
- name: Check all masters are node or not
|
- name: Check all control plane nodes are node or not
|
||||||
set_fact:
|
set_fact:
|
||||||
masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
|
control_plane_nodes_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
|
||||||
|
|
||||||
- name: Metrics Server | Delete addon dir
|
- name: Metrics Server | Delete addon dir
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -85,9 +85,9 @@ spec:
|
||||||
volumes:
|
volumes:
|
||||||
- name: tmp
|
- name: tmp
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
{% if not masters_are_not_tainted or metrics_server_extra_tolerations is defined %}
|
{% if not control_plane_nodes_are_not_tainted or metrics_server_extra_tolerations is defined %}
|
||||||
tolerations:
|
tolerations:
|
||||||
{% if not masters_are_not_tainted %}
|
{% if not control_plane_nodes_are_not_tainted %}
|
||||||
- key: node-role.kubernetes.io/control-plane
|
- key: node-role.kubernetes.io/control-plane
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -5,7 +5,7 @@ upgrade_cluster_setup: false
|
||||||
# By default the external API listens on all interfaces, this can be changed to
|
# By default the external API listens on all interfaces, this can be changed to
|
||||||
# listen on a specific address/interface.
|
# listen on a specific address/interface.
|
||||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||||
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
|
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
|
||||||
kube_apiserver_bind_address: 0.0.0.0
|
kube_apiserver_bind_address: 0.0.0.0
|
||||||
|
|
||||||
# A port range to reserve for services with NodePort visibility.
|
# A port range to reserve for services with NodePort visibility.
|
||||||
|
@ -38,7 +38,7 @@ kube_controller_manager_leader_elect_renew_deadline: 10s
|
||||||
# discovery_timeout modifies the discovery timeout
|
# discovery_timeout modifies the discovery timeout
|
||||||
discovery_timeout: 5m0s
|
discovery_timeout: 5m0s
|
||||||
|
|
||||||
# Instruct first master to refresh kubeadm token
|
# Instruct first control plane node to refresh kubeadm token
|
||||||
kubeadm_refresh_token: true
|
kubeadm_refresh_token: true
|
||||||
|
|
||||||
# Scale down coredns replicas to 0 if not using coredns dns_mode
|
# Scale down coredns replicas to 0 if not using coredns dns_mode
|
||||||
|
|
|
@ -1,16 +1,16 @@
|
||||||
---
|
---
|
||||||
- name: Master | reload systemd
|
- name: Control plane | reload systemd
|
||||||
systemd_service:
|
systemd_service:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
listen: Master | restart kubelet
|
listen: Control plane | restart kubelet
|
||||||
|
|
||||||
- name: Master | reload kubelet
|
- name: Control plane | reload kubelet
|
||||||
service:
|
service:
|
||||||
name: kubelet
|
name: kubelet
|
||||||
state: restarted
|
state: restarted
|
||||||
listen: Master | restart kubelet
|
listen: Control plane | restart kubelet
|
||||||
|
|
||||||
- name: Master | Remove apiserver container docker
|
- name: Control plane | Remove apiserver container docker
|
||||||
shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -19,9 +19,9 @@
|
||||||
until: remove_apiserver_container.rc == 0
|
until: remove_apiserver_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
when: container_manager == "docker"
|
when: container_manager == "docker"
|
||||||
listen: Master | Restart apiserver
|
listen: Control plane | Restart apiserver
|
||||||
|
|
||||||
- name: Master | Remove apiserver container containerd/crio
|
- name: Control plane | Remove apiserver container containerd/crio
|
||||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -30,9 +30,9 @@
|
||||||
until: remove_apiserver_container.rc == 0
|
until: remove_apiserver_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
when: container_manager in ['containerd', 'crio']
|
when: container_manager in ['containerd', 'crio']
|
||||||
listen: Master | Restart apiserver
|
listen: Control plane | Restart apiserver
|
||||||
|
|
||||||
- name: Master | Remove scheduler container docker
|
- name: Control plane | Remove scheduler container docker
|
||||||
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -41,9 +41,9 @@
|
||||||
until: remove_scheduler_container.rc == 0
|
until: remove_scheduler_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
when: container_manager == "docker"
|
when: container_manager == "docker"
|
||||||
listen: Master | Restart kube-scheduler
|
listen: Control plane | Restart kube-scheduler
|
||||||
|
|
||||||
- name: Master | Remove scheduler container containerd/crio
|
- name: Control plane | Remove scheduler container containerd/crio
|
||||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -52,9 +52,9 @@
|
||||||
until: remove_scheduler_container.rc == 0
|
until: remove_scheduler_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
when: container_manager in ['containerd', 'crio']
|
when: container_manager in ['containerd', 'crio']
|
||||||
listen: Master | Restart kube-scheduler
|
listen: Control plane | Restart kube-scheduler
|
||||||
|
|
||||||
- name: Master | Remove controller manager container docker
|
- name: Control plane | Remove controller manager container docker
|
||||||
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -63,9 +63,9 @@
|
||||||
until: remove_cm_container.rc == 0
|
until: remove_cm_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
when: container_manager == "docker"
|
when: container_manager == "docker"
|
||||||
listen: Master | Restart kube-controller-manager
|
listen: Control plane | Restart kube-controller-manager
|
||||||
|
|
||||||
- name: Master | Remove controller manager container containerd/crio
|
- name: Control plane | Remove controller manager container containerd/crio
|
||||||
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
@ -74,9 +74,9 @@
|
||||||
until: remove_cm_container.rc == 0
|
until: remove_cm_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
when: container_manager in ['containerd', 'crio']
|
when: container_manager in ['containerd', 'crio']
|
||||||
listen: Master | Restart kube-controller-manager
|
listen: Control plane | Restart kube-controller-manager
|
||||||
|
|
||||||
- name: Master | wait for kube-scheduler
|
- name: Control plane | wait for kube-scheduler
|
||||||
vars:
|
vars:
|
||||||
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
|
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||||
uri:
|
uri:
|
||||||
|
@ -87,10 +87,10 @@
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 1
|
delay: 1
|
||||||
listen:
|
listen:
|
||||||
- Master | restart kubelet
|
- Control plane | restart kubelet
|
||||||
- Master | Restart kube-scheduler
|
- Control plane | Restart kube-scheduler
|
||||||
|
|
||||||
- name: Master | wait for kube-controller-manager
|
- name: Control plane | wait for kube-controller-manager
|
||||||
vars:
|
vars:
|
||||||
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
|
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
|
||||||
uri:
|
uri:
|
||||||
|
@ -101,10 +101,10 @@
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 1
|
delay: 1
|
||||||
listen:
|
listen:
|
||||||
- Master | restart kubelet
|
- Control plane | restart kubelet
|
||||||
- Master | Restart kube-controller-manager
|
- Control plane | Restart kube-controller-manager
|
||||||
|
|
||||||
- name: Master | wait for the apiserver to be running
|
- name: Control plane | wait for the apiserver to be running
|
||||||
uri:
|
uri:
|
||||||
url: "{{ kube_apiserver_endpoint }}/healthz"
|
url: "{{ kube_apiserver_endpoint }}/healthz"
|
||||||
validate_certs: false
|
validate_certs: false
|
||||||
|
@ -113,5 +113,5 @@
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 1
|
delay: 1
|
||||||
listen:
|
listen:
|
||||||
- Master | restart kubelet
|
- Control plane | restart kubelet
|
||||||
- Master | Restart apiserver
|
- Control plane | Restart apiserver
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
|
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
|
||||||
when: secrets_encryption_file.stat.exists
|
when: secrets_encryption_file.stat.exists
|
||||||
|
|
||||||
- name: Set kube_encrypt_token across master nodes
|
- name: Set kube_encrypt_token across control plane nodes
|
||||||
set_fact:
|
set_fact:
|
||||||
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
|
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
|
|
@ -12,6 +12,6 @@
|
||||||
- kubelet.conf
|
- kubelet.conf
|
||||||
- scheduler.conf
|
- scheduler.conf
|
||||||
notify:
|
notify:
|
||||||
- "Master | Restart kube-controller-manager"
|
- "Control plane | Restart kube-controller-manager"
|
||||||
- "Master | Restart kube-scheduler"
|
- "Control plane | Restart kube-scheduler"
|
||||||
- "Master | reload kubelet"
|
- "Control plane | reload kubelet"
|
||||||
|
|
|
@ -189,7 +189,7 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
when: kubeadm_patches is defined and kubeadm_patches.enabled
|
||||||
|
|
||||||
- name: Kubeadm | Initialize first master
|
- name: Kubeadm | Initialize first control plane node
|
||||||
command: >-
|
command: >-
|
||||||
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
|
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
|
||||||
{{ bin_dir }}/kubeadm init
|
{{ bin_dir }}/kubeadm init
|
||||||
|
@ -205,7 +205,7 @@
|
||||||
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||||
notify: Master | restart kubelet
|
notify: Control plane | restart kubelet
|
||||||
|
|
||||||
- name: Set kubeadm certificate key
|
- name: Set kubeadm certificate key
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -250,7 +250,7 @@
|
||||||
tags:
|
tags:
|
||||||
- kubeadm_token
|
- kubeadm_token
|
||||||
|
|
||||||
- name: Kubeadm | Join other masters
|
- name: Kubeadm | Join other control plane nodes
|
||||||
include_tasks: kubeadm-secondary.yml
|
include_tasks: kubeadm-secondary.yml
|
||||||
|
|
||||||
- name: Kubeadm | upgrade kubernetes cluster
|
- name: Kubeadm | upgrade kubernetes cluster
|
||||||
|
@ -260,7 +260,7 @@
|
||||||
- kubeadm_already_run.stat.exists
|
- kubeadm_already_run.stat.exists
|
||||||
|
|
||||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||||
- name: Kubeadm | Remove taint for master with node role
|
- name: Kubeadm | Remove taint for control plane node with node role
|
||||||
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
||||||
delegate_to: "{{ first_kube_control_plane }}"
|
delegate_to: "{{ first_kube_control_plane }}"
|
||||||
with_items:
|
with_items:
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
delay: 5
|
delay: 5
|
||||||
until: _result.status == 200
|
until: _result.status == 200
|
||||||
|
|
||||||
- name: Kubeadm | Upgrade first master
|
- name: Kubeadm | Upgrade first control plane node
|
||||||
command: >-
|
command: >-
|
||||||
timeout -k 600s 600s
|
timeout -k 600s 600s
|
||||||
{{ bin_dir }}/kubeadm
|
{{ bin_dir }}/kubeadm
|
||||||
|
@ -28,9 +28,9 @@
|
||||||
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||||
notify: Master | restart kubelet
|
notify: Control plane | restart kubelet
|
||||||
|
|
||||||
- name: Kubeadm | Upgrade other masters
|
- name: Kubeadm | Upgrade other control plane nodes
|
||||||
command: >-
|
command: >-
|
||||||
timeout -k 600s 600s
|
timeout -k 600s 600s
|
||||||
{{ bin_dir }}/kubeadm
|
{{ bin_dir }}/kubeadm
|
||||||
|
@ -49,7 +49,7 @@
|
||||||
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||||
notify: Master | restart kubelet
|
notify: Control plane | restart kubelet
|
||||||
|
|
||||||
- name: Kubeadm | Remove binding to anonymous user
|
- name: Kubeadm | Remove binding to anonymous user
|
||||||
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"
|
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||||
backup: true
|
backup: true
|
||||||
notify:
|
notify:
|
||||||
- "Master | reload kubelet"
|
- "Control plane | reload kubelet"
|
||||||
|
|
||||||
- name: Fixup kubelet client cert rotation 2/2
|
- name: Fixup kubelet client cert rotation 2/2
|
||||||
lineinfile:
|
lineinfile:
|
||||||
|
@ -15,4 +15,4 @@
|
||||||
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
|
||||||
backup: true
|
backup: true
|
||||||
notify:
|
notify:
|
||||||
- "Master | reload kubelet"
|
- "Control plane | reload kubelet"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
|
- name: "Pre-upgrade | Delete control plane manifests if etcd secrets changed"
|
||||||
file:
|
file:
|
||||||
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
|
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -8,14 +8,14 @@
|
||||||
register: kube_apiserver_manifest_replaced
|
register: kube_apiserver_manifest_replaced
|
||||||
when: etcd_secret_changed | default(false)
|
when: etcd_secret_changed | default(false)
|
||||||
|
|
||||||
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
|
- name: "Pre-upgrade | Delete control plane containers forcefully" # noqa no-handler
|
||||||
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
with_items:
|
with_items:
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
when: kube_apiserver_manifest_replaced.changed
|
when: kube_apiserver_manifest_replaced.changed
|
||||||
register: remove_master_container
|
register: remove_control_plane_container
|
||||||
retries: 10
|
retries: 10
|
||||||
until: remove_master_container.rc == 0
|
until: remove_control_plane_container.rc == 0
|
||||||
delay: 1
|
delay: 1
|
||||||
|
|
|
@ -71,7 +71,7 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
when:
|
when:
|
||||||
- not is_kube_master
|
- ('kube_control_plane' not in group_names)
|
||||||
- not kubelet_conf.stat.exists
|
- not kubelet_conf.stat.exists
|
||||||
- kubeadm_use_file_discovery
|
- kubeadm_use_file_discovery
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@
|
||||||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||||
backup: true
|
backup: true
|
||||||
mode: "0640"
|
mode: "0640"
|
||||||
when: not is_kube_master
|
when: ('kube_control_plane' not in group_names)
|
||||||
|
|
||||||
- name: Kubeadm | Create directory to store kubeadm patches
|
- name: Kubeadm | Create directory to store kubeadm patches
|
||||||
file:
|
file:
|
||||||
|
@ -101,7 +101,9 @@
|
||||||
- name: Join to cluster if needed
|
- name: Join to cluster if needed
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
|
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
|
||||||
when: not is_kube_master and (not kubelet_conf.stat.exists)
|
when:
|
||||||
|
- ('kube_control_plane' not in group_names)
|
||||||
|
- not kubelet_conf.stat.exists
|
||||||
block:
|
block:
|
||||||
|
|
||||||
- name: Join to cluster
|
- name: Join to cluster
|
||||||
|
@ -143,7 +145,7 @@
|
||||||
backup: true
|
backup: true
|
||||||
when:
|
when:
|
||||||
- kubeadm_config_api_fqdn is not defined
|
- kubeadm_config_api_fqdn is not defined
|
||||||
- not is_kube_master
|
- ('kube_control_plane' not in group_names)
|
||||||
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
|
||||||
notify: Kubeadm | restart kubelet
|
notify: Kubeadm | restart kubelet
|
||||||
|
|
||||||
|
@ -154,7 +156,7 @@
|
||||||
line: ' server: {{ kube_apiserver_endpoint }}'
|
line: ' server: {{ kube_apiserver_endpoint }}'
|
||||||
backup: true
|
backup: true
|
||||||
when:
|
when:
|
||||||
- not is_kube_master
|
- ('kube_control_plane' not in group_names)
|
||||||
- loadbalancer_apiserver is defined
|
- loadbalancer_apiserver is defined
|
||||||
notify: Kubeadm | restart kubelet
|
notify: Kubeadm | restart kubelet
|
||||||
|
|
||||||
|
@ -169,8 +171,8 @@
|
||||||
tags:
|
tags:
|
||||||
- kube-proxy
|
- kube-proxy
|
||||||
|
|
||||||
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
|
# FIXME(mattymo): Need to point to localhost, otherwise control plane nodes will all point
|
||||||
# incorrectly to first master, creating SPoF.
|
# incorrectly to first control plane node, creating SPoF.
|
||||||
- name: Update server field in kube-proxy kubeconfig
|
- name: Update server field in kube-proxy kubeconfig
|
||||||
shell: >-
|
shell: >-
|
||||||
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
|
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
|
||||||
|
|
|
@ -42,7 +42,7 @@ kube_memory_reserved: 256Mi
|
||||||
kube_cpu_reserved: 100m
|
kube_cpu_reserved: 100m
|
||||||
# kube_ephemeral_storage_reserved: 2Gi
|
# kube_ephemeral_storage_reserved: 2Gi
|
||||||
# kube_pid_reserved: "1000"
|
# kube_pid_reserved: "1000"
|
||||||
# Reservation for master hosts
|
# Reservation for control plane hosts
|
||||||
kube_master_memory_reserved: 512Mi
|
kube_master_memory_reserved: 512Mi
|
||||||
kube_master_cpu_reserved: 200m
|
kube_master_cpu_reserved: 200m
|
||||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||||
|
@ -56,7 +56,7 @@ system_memory_reserved: 512Mi
|
||||||
system_cpu_reserved: 500m
|
system_cpu_reserved: 500m
|
||||||
# system_ephemeral_storage_reserved: 2Gi
|
# system_ephemeral_storage_reserved: 2Gi
|
||||||
# system_pid_reserved: "1000"
|
# system_pid_reserved: "1000"
|
||||||
# Reservation for master hosts
|
# Reservation for control plane hosts
|
||||||
system_master_memory_reserved: 256Mi
|
system_master_memory_reserved: 256Mi
|
||||||
system_master_cpu_reserved: 250m
|
system_master_cpu_reserved: 250m
|
||||||
# system_master_ephemeral_storage_reserved: 2Gi
|
# system_master_ephemeral_storage_reserved: 2Gi
|
||||||
|
@ -136,7 +136,7 @@ kubelet_config_extra_args_cgroupfs:
|
||||||
systemCgroups: /system.slice
|
systemCgroups: /system.slice
|
||||||
cgroupRoot: /
|
cgroupRoot: /
|
||||||
|
|
||||||
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters
|
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not control plane nodes
|
||||||
kubelet_node_config_extra_args: {}
|
kubelet_node_config_extra_args: {}
|
||||||
|
|
||||||
# Maximum number of container log files that can be present for a container.
|
# Maximum number of container log files that can be present for a container.
|
||||||
|
@ -148,7 +148,7 @@ kubelet_logfiles_max_size: 10Mi
|
||||||
## Support custom flags to be passed to kubelet
|
## Support custom flags to be passed to kubelet
|
||||||
kubelet_custom_flags: []
|
kubelet_custom_flags: []
|
||||||
|
|
||||||
## Support custom flags to be passed to kubelet only on nodes, not masters
|
## Support custom flags to be passed to kubelet only on nodes, not control plane nodes
|
||||||
kubelet_node_custom_flags: []
|
kubelet_node_custom_flags: []
|
||||||
|
|
||||||
# If non-empty, will use this string as identification instead of the actual hostname
|
# If non-empty, will use this string as identification instead of the actual hostname
|
||||||
|
@ -216,7 +216,7 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default(''
|
||||||
# azure_vmtype: standard
|
# azure_vmtype: standard
|
||||||
# Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
# Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||||
azure_loadbalancer_sku: basic
|
azure_loadbalancer_sku: basic
|
||||||
# excludes master nodes from standard load balancer.
|
# excludes control plane nodes from standard load balancer.
|
||||||
azure_exclude_master_from_standard_lb: true
|
azure_exclude_master_from_standard_lb: true
|
||||||
# disables the outbound SNAT for public load balancer rules
|
# disables the outbound SNAT for public load balancer rules
|
||||||
azure_disable_outbound_snat: false
|
azure_disable_outbound_snat: false
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
- name: Install kube-vip
|
- name: Install kube-vip
|
||||||
import_tasks: loadbalancer/kube-vip.yml
|
import_tasks: loadbalancer/kube-vip.yml
|
||||||
when:
|
when:
|
||||||
- is_kube_master
|
- ('kube_control_plane' in group_names)
|
||||||
- kube_vip_enabled
|
- kube_vip_enabled
|
||||||
tags:
|
tags:
|
||||||
- kube-vip
|
- kube-vip
|
||||||
|
@ -32,7 +32,7 @@
|
||||||
- name: Install nginx-proxy
|
- name: Install nginx-proxy
|
||||||
import_tasks: loadbalancer/nginx-proxy.yml
|
import_tasks: loadbalancer/nginx-proxy.yml
|
||||||
when:
|
when:
|
||||||
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
|
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
|
||||||
- loadbalancer_apiserver_localhost
|
- loadbalancer_apiserver_localhost
|
||||||
- loadbalancer_apiserver_type == 'nginx'
|
- loadbalancer_apiserver_type == 'nginx'
|
||||||
tags:
|
tags:
|
||||||
|
@ -41,7 +41,7 @@
|
||||||
- name: Install haproxy
|
- name: Install haproxy
|
||||||
import_tasks: loadbalancer/haproxy.yml
|
import_tasks: loadbalancer/haproxy.yml
|
||||||
when:
|
when:
|
||||||
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
|
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
|
||||||
- loadbalancer_apiserver_localhost
|
- loadbalancer_apiserver_localhost
|
||||||
- loadbalancer_apiserver_type == 'haproxy'
|
- loadbalancer_apiserver_type == 'haproxy'
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -64,7 +64,7 @@ clusterDNS:
|
||||||
kubeReservedCgroup: {{ kube_reserved_cgroups }}
|
kubeReservedCgroup: {{ kube_reserved_cgroups }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
kubeReserved:
|
kubeReserved:
|
||||||
{% if is_kube_master | bool %}
|
{% if 'kube_control_plane' in group_names %}
|
||||||
cpu: "{{ kube_master_cpu_reserved }}"
|
cpu: "{{ kube_master_cpu_reserved }}"
|
||||||
memory: {{ kube_master_memory_reserved }}
|
memory: {{ kube_master_memory_reserved }}
|
||||||
{% if kube_master_ephemeral_storage_reserved is defined %}
|
{% if kube_master_ephemeral_storage_reserved is defined %}
|
||||||
|
@ -86,7 +86,7 @@ kubeReserved:
|
||||||
{% if system_reserved | bool %}
|
{% if system_reserved | bool %}
|
||||||
systemReservedCgroup: {{ system_reserved_cgroups }}
|
systemReservedCgroup: {{ system_reserved_cgroups }}
|
||||||
systemReserved:
|
systemReserved:
|
||||||
{% if is_kube_master | bool %}
|
{% if 'kube_control_plane' in group_names %}
|
||||||
cpu: "{{ system_master_cpu_reserved }}"
|
cpu: "{{ system_master_cpu_reserved }}"
|
||||||
memory: {{ system_master_memory_reserved }}
|
memory: {{ system_master_memory_reserved }}
|
||||||
{% if system_master_ephemeral_storage_reserved is defined %}
|
{% if system_master_ephemeral_storage_reserved is defined %}
|
||||||
|
@ -106,10 +106,10 @@ systemReserved:
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
|
{% if ('kube_control_plane' in group_names) and (eviction_hard_control_plane is defined) and eviction_hard_control_plane %}
|
||||||
evictionHard:
|
evictionHard:
|
||||||
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
|
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
|
||||||
{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %}
|
{% elif ('kube_control_plane' not in group_names) and (eviction_hard is defined) and eviction_hard %}
|
||||||
evictionHard:
|
evictionHard:
|
||||||
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
|
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -60,7 +60,7 @@
|
||||||
- not ignore_assert_errors
|
- not ignore_assert_errors
|
||||||
- inventory_hostname in groups.get('etcd',[])
|
- inventory_hostname in groups.get('etcd',[])
|
||||||
|
|
||||||
- name: Stop if memory is too small for masters
|
- name: Stop if memory is too small for control plane nodes
|
||||||
assert:
|
assert:
|
||||||
that: ansible_memtotal_mb >= minimal_master_memory_mb
|
that: ansible_memtotal_mb >= minimal_master_memory_mb
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -15,7 +15,8 @@
|
||||||
- bootstrap-os
|
- bootstrap-os
|
||||||
- apps
|
- apps
|
||||||
- network
|
- network
|
||||||
- master
|
- master # master tag is deprecated and replaced by control-plane
|
||||||
|
- control-plane
|
||||||
- node
|
- node
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_config_dir }}"
|
- "{{ kube_config_dir }}"
|
||||||
|
@ -39,7 +40,8 @@
|
||||||
- bootstrap-os
|
- bootstrap-os
|
||||||
- apps
|
- apps
|
||||||
- network
|
- network
|
||||||
- master
|
- master # master tag is deprecated and replaced by control-plane
|
||||||
|
- control-plane
|
||||||
- node
|
- node
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_cert_dir }}"
|
- "{{ kube_cert_dir }}"
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: "Check_tokens | check if the tokens have already been generated on first master"
|
- name: "Check_tokens | check if the tokens have already been generated on first control plane node"
|
||||||
stat:
|
stat:
|
||||||
path: "{{ kube_token_dir }}/known_tokens.csv"
|
path: "{{ kube_token_dir }}/known_tokens.csv"
|
||||||
get_attributes: false
|
get_attributes: false
|
||||||
get_checksum: true
|
get_checksum: true
|
||||||
get_mime: false
|
get_mime: false
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
register: known_tokens_master
|
register: known_tokens_control_plane
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false"
|
- name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false"
|
||||||
|
@ -17,7 +17,7 @@
|
||||||
- name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true"
|
- name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true"
|
||||||
set_fact:
|
set_fact:
|
||||||
gen_tokens: true
|
gen_tokens: true
|
||||||
when: not known_tokens_master.stat.exists and kube_token_auth | default(true)
|
when: not known_tokens_control_plane.stat.exists and kube_token_auth | default(true)
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: "Check tokens | check if a cert already exists"
|
- name: "Check tokens | check if a cert already exists"
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
{%- set tokens = {'sync': False} -%}
|
{%- set tokens = {'sync': False} -%}
|
||||||
{%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
|
{%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
|
||||||
if (not hostvars[server].known_tokens.stat.exists) or
|
if (not hostvars[server].known_tokens.stat.exists) or
|
||||||
(hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_master.stat.checksum | default('')) -%}
|
(hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_control_plane.stat.checksum | default('')) -%}
|
||||||
{%- set _ = tokens.update({'sync': True}) -%}
|
{%- set _ = tokens.update({'sync': True}) -%}
|
||||||
{%- endfor -%}
|
{%- endfor -%}
|
||||||
{{ tokens.sync }}
|
{{ tokens.sync }}
|
||||||
|
|
|
@ -8,15 +8,15 @@
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: gen_tokens | default(false)
|
when: gen_tokens | default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | generate tokens for master components
|
- name: Gen_tokens | generate tokens for control plane components
|
||||||
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
|
||||||
environment:
|
environment:
|
||||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||||
with_nested:
|
with_nested:
|
||||||
- [ "system:kubectl" ]
|
- [ "system:kubectl" ]
|
||||||
- "{{ groups['kube_control_plane'] }}"
|
- "{{ groups['kube_control_plane'] }}"
|
||||||
register: gentoken_master
|
register: gentoken_control_plane
|
||||||
changed_when: "'Added' in gentoken_master.stdout"
|
changed_when: "'Added' in gentoken_control_plane.stdout"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: gen_tokens | default(false)
|
when: gen_tokens | default(false)
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: gen_tokens | default(false)
|
when: gen_tokens | default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | Get list of tokens from first master
|
- name: Gen_tokens | Get list of tokens from first control plane node
|
||||||
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
|
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
|
||||||
register: tokens_list
|
register: tokens_list
|
||||||
check_mode: false
|
check_mode: false
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
when: sync_tokens | default(false)
|
when: sync_tokens | default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | Copy tokens on masters
|
- name: Gen_tokens | Copy tokens on control plane nodes
|
||||||
shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /"
|
shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
|
@ -243,7 +243,7 @@ kube_network_node_prefix_ipv6: 120
|
||||||
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
|
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
|
||||||
|
|
||||||
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
|
||||||
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
|
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
|
||||||
kube_apiserver_bind_address: 0.0.0.0
|
kube_apiserver_bind_address: 0.0.0.0
|
||||||
|
|
||||||
# https
|
# https
|
||||||
|
@ -531,7 +531,6 @@ ssl_ca_dirs: |-
|
||||||
]
|
]
|
||||||
|
|
||||||
# Vars for pointing to kubernetes api endpoints
|
# Vars for pointing to kubernetes api endpoints
|
||||||
is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}"
|
|
||||||
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
|
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
|
||||||
kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
|
kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
|
||||||
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
|
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
|
||||||
|
@ -551,9 +550,9 @@ kube_apiserver_global_endpoint: |-
|
||||||
kube_apiserver_endpoint: |-
|
kube_apiserver_endpoint: |-
|
||||||
{% if loadbalancer_apiserver is defined -%}
|
{% if loadbalancer_apiserver is defined -%}
|
||||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||||
{%- elif not is_kube_master and loadbalancer_apiserver_localhost -%}
|
{%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
|
||||||
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
|
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
|
||||||
{%- elif is_kube_master -%}
|
{%- elif 'kube_control_plane' in group_names -%}
|
||||||
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
|
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
|
||||||
|
@ -568,7 +567,6 @@ etcd_events_cluster_enabled: false
|
||||||
etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
|
etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
|
||||||
|
|
||||||
# Vars for pointing to etcd endpoints
|
# Vars for pointing to etcd endpoints
|
||||||
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
|
|
||||||
etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
|
etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
|
||||||
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
|
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
|
||||||
etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
|
etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
|
||||||
|
|
|
@ -8,11 +8,11 @@
|
||||||
{{ loadbalancer_apiserver.address | default('') }},
|
{{ loadbalancer_apiserver.address | default('') }},
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
{%- if no_proxy_exclude_workers | default(false) -%}
|
{%- if no_proxy_exclude_workers | default(false) -%}
|
||||||
{% set cluster_or_master = 'kube_control_plane' %}
|
{% set cluster_or_control_plane = 'kube_control_plane' %}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
{% set cluster_or_master = 'k8s_cluster' %}
|
{% set cluster_or_control_plane = 'k8s_cluster' %}
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
{%- for item in (groups[cluster_or_master] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
|
{%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
|
||||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
|
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
|
||||||
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
||||||
{{ hostvars[item]['ansible_hostname'] }},
|
{{ hostvars[item]['ansible_hostname'] }},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
# This manifest installs the calico/node container, as well
|
# This manifest installs the calico/node container, as well
|
||||||
# as the Calico CNI plugins and network config on
|
# as the Calico CNI plugins and network config on
|
||||||
# each master and worker node in a Kubernetes cluster.
|
# each control plane and worker node in a Kubernetes cluster.
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -15,14 +15,14 @@
|
||||||
environment:
|
environment:
|
||||||
KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||||
with_items: "{{ groups['broken_kube_control_plane'] }}"
|
with_items: "{{ groups['broken_kube_control_plane'] }}"
|
||||||
register: delete_broken_kube_masters
|
register: delete_broken_kube_control_plane_nodes
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: groups['broken_kube_control_plane']
|
when: groups['broken_kube_control_plane']
|
||||||
|
|
||||||
- name: Fail if unable to delete broken kube_control_plane nodes from cluster
|
- name: Fail if unable to delete broken kube_control_plane nodes from cluster
|
||||||
fail:
|
fail:
|
||||||
msg: "Unable to delete broken kube_control_plane node: {{ item.item }}"
|
msg: "Unable to delete broken kube_control_plane node: {{ item.item }}"
|
||||||
loop: "{{ delete_broken_kube_masters.results }}"
|
loop: "{{ delete_broken_kube_control_plane_nodes.results }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- groups['broken_kube_control_plane']
|
- groups['broken_kube_control_plane']
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# ignore servers that are not nodes
|
# ignore servers that are not nodes
|
||||||
- inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
|
- inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
|
||||||
retries: "{{ delete_node_retries }}"
|
retries: "{{ delete_node_retries }}"
|
||||||
# Sometimes the api-server can have a short window of indisponibility when we delete a master node
|
# Sometimes the api-server can have a short window of indisponibility when we delete a control plane node
|
||||||
delay: "{{ delete_node_delay_seconds }}"
|
delay: "{{ delete_node_delay_seconds }}"
|
||||||
register: result
|
register: result
|
||||||
until: result is not failed
|
until: result is not failed
|
||||||
|
|
|
@ -122,7 +122,7 @@ EOF
|
||||||
|
|
||||||
fi
|
fi
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
## Test Master API
|
## Test Control Plane API
|
||||||
run_playbook tests/testcases/010_check-apiserver.yml
|
run_playbook tests/testcases/010_check-apiserver.yml
|
||||||
run_playbook tests/testcases/015_check-nodes-ready.yml
|
run_playbook tests/testcases/015_check-nodes-ready.yml
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue