Rename master to control plane - non-breaking changes only (#11394)

K8s is moving away from the "master" terminology, so kubespray should follow the same naming conventions. See 65d886bb30/sig-architecture/naming/recommendations/001-master-control-plane.md
pull/11512/head
Bogdan Sass 2024-09-06 09:56:19 +03:00 committed by GitHub
parent d4bf3b9dc7
commit 4b324cb0f0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
37 changed files with 165 additions and 138 deletions

View File

@ -60,17 +60,17 @@ You can create many different kubernetes topologies by setting the number of
different classes of hosts. For each class there are options for allocating
floating IP addresses or not.
- Master nodes with etcd
- Master nodes without etcd
- Control plane nodes with etcd
- Control plane nodes without etcd
- Standalone etcd hosts
- Kubernetes worker nodes
Note that the Ansible script will report an invalid configuration if you wind up
with an even number of etcd instances since that is not a valid configuration. This
restriction includes standalone etcd nodes that are deployed in a cluster along with
master nodes with etcd replicas. As an example, if you have three master nodes with
etcd replicas and three standalone etcd nodes, the script will fail since there are
now six total etcd replicas.
control plane nodes with etcd replicas. As an example, if you have three control plane
nodes with etcd replicas and three standalone etcd nodes, the script will fail since
there are now six total etcd replicas.
### GlusterFS shared file system

View File

@ -155,6 +155,7 @@ The following tags are defined in playbooks:
| container_engine_accelerator | Enable nvidia accelerator for runtimes |
| container-engine | Configuring container engines |
| container-runtimes | Configuring container runtimes |
| control-plane | Configuring K8s control plane node role |
| coredns | Configuring coredns deployment |
| crio | Configuring crio container engine for hosts |
| crun | Configuring crun runtime |
@ -199,7 +200,7 @@ The following tags are defined in playbooks:
| local-path-provisioner | Configure External provisioner: local-path |
| local-volume-provisioner | Configure External provisioner: local-volume |
| macvlan | Network plugin macvlan |
| master | Configuring K8s master node role |
| master (DEPRECATED) | Deprecated - see `control-plane` |
| metallb | Installing and configuring metallb |
| metrics_server | Configuring metrics_server |
| netchecker | Installing netchecker K8s app |
@ -210,7 +211,7 @@ The following tags are defined in playbooks:
| node | Configuring K8s minion (compute) node role |
| nodelocaldns | Configuring nodelocaldns daemonset |
| node-label | Tasks linked to labeling of nodes |
| node-webhook | Tasks linked to webhook (grating access to resources) |
| node-webhook | Tasks linked to webhook (granting access to resources)|
| nvidia_gpu | Enable nvidia accelerator for runtimes |
| oci | Cloud provider: oci |
| persistent_volumes | Configure csi volumes |

View File

@ -14,7 +14,7 @@ Installs docker in etcd group members and runs etcd on docker containers. Only u
### Kubeadm
This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod in master hosts.
This deployment method is experimental and is only available for new deployments. This deploys etcd as a static pod on control plane hosts.
## Metrics

View File

@ -13,7 +13,7 @@
tasks:
- name: Include kubespray-default variables
include_vars: ../roles/kubespray-defaults/defaults/main/main.yml
- name: Copy get_cinder_pvs.sh to master
- name: Copy get_cinder_pvs.sh to first control plane node
copy:
src: get_cinder_pvs.sh
dest: /tmp

View File

@ -36,7 +36,7 @@
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
- name: Handle upgrades to master components first to maintain backwards compat.
- name: Handle upgrades to control plane components first to maintain backwards compat.
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1

View File

@ -75,8 +75,8 @@ loadbalancer_apiserver_healthcheck_port: 8081
# skip_http_proxy_on_os_packages: false
## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
## pods will restart) when adding or removing workers. To override this behaviour by only including control plane nodes
## in the no_proxy variable, set below to true:
no_proxy_exclude_workers: false
## Certificate Management

View File

@ -272,7 +272,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
# kube_cpu_reserved: 100m
# kube_ephemeral_storage_reserved: 2Gi
# kube_pid_reserved: "1000"
# Reservation for master hosts
# Reservation for control plane hosts
# kube_master_memory_reserved: 512Mi
# kube_master_cpu_reserved: 200m
# kube_master_ephemeral_storage_reserved: 2Gi

View File

@ -33,7 +33,7 @@
- { role: remove-node/remove-etcd-node }
- { role: reset, tags: reset, when: reset_nodes | default(True) | bool }
# Currently cannot remove first master or etcd
# Currently cannot remove first control plane node or first etcd node
- name: Post node removal
hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: false

View File

@ -38,7 +38,7 @@
- name: Install etcd
import_playbook: install_etcd.yml
- name: Handle upgrades to master components first to maintain backwards compat.
- name: Handle upgrades to control plane components first to maintain backwards compat.
gather_facts: false
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
@ -60,7 +60,7 @@
- { role: kubernetes-apps, tags: csi-driver }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
- name: Upgrade calico and external cloud provider on all control plane nodes, calico-rrs, and nodes
hosts: kube_control_plane:calico_rr:kube_node
gather_facts: false
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View File

@ -13,19 +13,19 @@
service:
name: etcd
state: restarted
when: is_etcd_master
when: ('etcd' in group_names)
listen: Restart etcd
- name: Reload etcd-events
service:
name: etcd-events
state: restarted
when: is_etcd_master
when: ('etcd' in group_names)
listen: Restart etcd-events
- name: Wait for etcd up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"
@ -40,7 +40,7 @@
- name: Wait for etcd-events up
uri:
url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
url: "https://{% if 'etcd' in group_names %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2383/health"
validate_certs: false
client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem"
client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem"

View File

@ -9,7 +9,7 @@
check_mode: false
run_once: true
when:
- is_etcd_master
- ('etcd' in group_names)
- etcd_cluster_setup
tags:
- facts
@ -30,7 +30,7 @@
check_mode: false
run_once: true
when:
- is_etcd_master
- ('etcd' in group_names)
- etcd_events_cluster_setup
tags:
- facts
@ -43,7 +43,7 @@
- name: Configure | Refresh etcd config
include_tasks: refresh_config.yml
when: is_etcd_master
when: ('etcd' in group_names)
- name: Configure | Copy etcd.service systemd file
template:
@ -54,7 +54,9 @@
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-{{ etcd_deployment_type }}.service'"
when: is_etcd_master and etcd_cluster_setup
when:
- ('etcd' in group_names)
- etcd_cluster_setup
- name: Configure | Copy etcd-events.service systemd file
template:
@ -65,12 +67,14 @@
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:etcd-events-{{ etcd_deployment_type }}.service'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250
when: is_etcd_master and etcd_events_cluster_setup
when:
- ('etcd' in group_names)
- etcd_events_cluster_setup
- name: Configure | reload systemd
systemd_service:
daemon_reload: true
when: is_etcd_master
when: ('etcd' in group_names)
# when scaling new etcd will fail to start
- name: Configure | Ensure etcd is running
@ -79,7 +83,9 @@
state: started
enabled: true
ignore_errors: "{{ etcd_cluster_is_healthy.rc == 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_cluster_setup
when:
- ('etcd' in group_names)
- etcd_cluster_setup
# when scaling new etcd will fail to start
- name: Configure | Ensure etcd-events is running
@ -88,7 +94,9 @@
state: started
enabled: true
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc != 0 }}" # noqa ignore-errors
when: is_etcd_master and etcd_events_cluster_setup
when:
- ('etcd' in group_names)
- etcd_events_cluster_setup
- name: Configure | Wait for etcd cluster to be healthy
shell: "set -o pipefail && {{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -v 'Error: unhealthy cluster' >/dev/null"
@ -102,7 +110,7 @@
check_mode: false
run_once: true
when:
- is_etcd_master
- ('etcd' in group_names)
- etcd_cluster_setup
tags:
- facts
@ -125,7 +133,7 @@
check_mode: false
run_once: true
when:
- is_etcd_master
- ('etcd' in group_names)
- etcd_events_cluster_setup
tags:
- facts
@ -142,7 +150,9 @@
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: false
when: is_etcd_master and etcd_cluster_setup
when:
- ('etcd' in group_names)
- etcd_cluster_setup
tags:
- facts
environment:
@ -158,7 +168,9 @@
ignore_errors: true # noqa ignore-errors
changed_when: false
check_mode: false
when: is_etcd_master and etcd_events_cluster_setup
when:
- ('etcd' in group_names)
- etcd_events_cluster_setup
tags:
- facts
environment:

View File

@ -16,7 +16,7 @@
- name: Trust etcd CA
include_tasks: upd_ca_trust.yml
when:
- inventory_hostname in groups['etcd'] | union(groups['kube_control_plane']) | unique | sort
- ('etcd' in group_names) or ('kube_control_plane' in group_names)
tags:
- etcd-secrets
@ -39,7 +39,8 @@
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
tags:
- master
- master # master tag is deprecated and replaced by control-plane
- control-plane
- network
- name: Set etcd_client_cert_serial
@ -50,7 +51,8 @@
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- inventory_hostname in groups['k8s_cluster']
tags:
- master
- master # master tag is deprecated and replaced by control-plane
- control-plane
- network
- name: Install etcdctl and etcdutl binary
@ -61,36 +63,42 @@
- etcdutl
- upgrade
when:
- inventory_hostname in groups['etcd']
- ('etcd' in group_names)
- etcd_cluster_setup
- name: Install etcd
include_tasks: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master
when: ('etcd' in group_names)
tags:
- upgrade
- name: Configure etcd
include_tasks: configure.yml
when: is_etcd_master
when: ('etcd' in group_names)
- name: Refresh etcd config
include_tasks: refresh_config.yml
when: is_etcd_master
when: ('etcd' in group_names)
- name: Restart etcd if certs changed
command: /bin/true
notify: Restart etcd
when: is_etcd_master and etcd_cluster_setup and etcd_secret_changed | default(false)
when:
- ('etcd' in group_names)
- etcd_cluster_setup
- etcd_secret_changed | default(false)
- name: Restart etcd-events if certs changed
command: /bin/true
notify: Restart etcd
when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed | default(false)
when:
- ('etcd' in group_names)
- etcd_events_cluster_setup
- etcd_secret_changed | default(false)
# After etcd cluster is assembled, make sure that
# initial state of the cluster is in `existing`
# state instead of `new`.
- name: Refresh etcd config again for idempotency
include_tasks: refresh_config.yml
when: is_etcd_master
when: ('etcd' in group_names)

View File

@ -5,7 +5,9 @@
dest: /etc/etcd.env
mode: "0640"
notify: Restart etcd
when: is_etcd_master and etcd_cluster_setup
when:
- ('etcd' in group_names)
- etcd_cluster_setup
- name: Refresh config | Create etcd-events config file
template:
@ -13,4 +15,6 @@
dest: /etc/etcd-events.env
mode: "0640"
notify: Restart etcd-events
when: is_etcd_master and etcd_events_cluster_setup
when:
- ('etcd' in group_names)
- etcd_events_cluster_setup

View File

@ -1,8 +1,8 @@
---
# If all masters have node role, there are no tainted master and toleration should not be specified.
- name: Check all masters are node or not
# If all control plane nodes have the node role, there are no tainted control plane nodes and toleration should not be specified.
- name: Check all control plane nodes are node or not
set_fact:
masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
control_plane_nodes_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
- name: Metrics Server | Delete addon dir
file:

View File

@ -85,9 +85,9 @@ spec:
volumes:
- name: tmp
emptyDir: {}
{% if not masters_are_not_tainted or metrics_server_extra_tolerations is defined %}
{% if not control_plane_nodes_are_not_tainted or metrics_server_extra_tolerations is defined %}
tolerations:
{% if not masters_are_not_tainted %}
{% if not control_plane_nodes_are_not_tainted %}
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{% endif %}

View File

@ -5,7 +5,7 @@ upgrade_cluster_setup: false
# By default the external API listens on all interfaces, this can be changed to
# listen on a specific address/interface.
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
kube_apiserver_bind_address: 0.0.0.0
# A port range to reserve for services with NodePort visibility.
@ -38,7 +38,7 @@ kube_controller_manager_leader_elect_renew_deadline: 10s
# discovery_timeout modifies the discovery timeout
discovery_timeout: 5m0s
# Instruct first master to refresh kubeadm token
# Instruct first control plane node to refresh kubeadm token
kubeadm_refresh_token: true
# Scale down coredns replicas to 0 if not using coredns dns_mode

View File

@ -1,16 +1,16 @@
---
- name: Master | reload systemd
- name: Control plane | reload systemd
systemd_service:
daemon_reload: true
listen: Master | restart kubelet
listen: Control plane | restart kubelet
- name: Master | reload kubelet
- name: Control plane | reload kubelet
service:
name: kubelet
state: restarted
listen: Master | restart kubelet
listen: Control plane | restart kubelet
- name: Master | Remove apiserver container docker
- name: Control plane | Remove apiserver container docker
shell: "set -o pipefail && docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
@ -19,9 +19,9 @@
until: remove_apiserver_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart apiserver
listen: Control plane | Restart apiserver
- name: Master | Remove apiserver container containerd/crio
- name: Control plane | Remove apiserver container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-apiserver* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
@ -30,9 +30,9 @@
until: remove_apiserver_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart apiserver
listen: Control plane | Restart apiserver
- name: Master | Remove scheduler container docker
- name: Control plane | Remove scheduler container docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
@ -41,9 +41,9 @@
until: remove_scheduler_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart kube-scheduler
listen: Control plane | Restart kube-scheduler
- name: Master | Remove scheduler container containerd/crio
- name: Control plane | Remove scheduler container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-scheduler* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
@ -52,9 +52,9 @@
until: remove_scheduler_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart kube-scheduler
listen: Control plane | Restart kube-scheduler
- name: Master | Remove controller manager container docker
- name: Control plane | Remove controller manager container docker
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty {{ docker_bin_dir }}/docker rm -f"
args:
executable: /bin/bash
@ -63,9 +63,9 @@
until: remove_cm_container.rc == 0
delay: 1
when: container_manager == "docker"
listen: Master | Restart kube-controller-manager
listen: Control plane | Restart kube-controller-manager
- name: Master | Remove controller manager container containerd/crio
- name: Control plane | Remove controller manager container containerd/crio
shell: "set -o pipefail && {{ bin_dir }}/crictl pods --name kube-controller-manager* -q | xargs -I% --no-run-if-empty bash -c '{{ bin_dir }}/crictl stopp % && {{ bin_dir }}/crictl rmp %'"
args:
executable: /bin/bash
@ -74,9 +74,9 @@
until: remove_cm_container.rc == 0
delay: 1
when: container_manager in ['containerd', 'crio']
listen: Master | Restart kube-controller-manager
listen: Control plane | Restart kube-controller-manager
- name: Master | wait for kube-scheduler
- name: Control plane | wait for kube-scheduler
vars:
endpoint: "{{ kube_scheduler_bind_address if kube_scheduler_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
@ -87,10 +87,10 @@
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart kube-scheduler
- Control plane | restart kubelet
- Control plane | Restart kube-scheduler
- name: Master | wait for kube-controller-manager
- name: Control plane | wait for kube-controller-manager
vars:
endpoint: "{{ kube_controller_manager_bind_address if kube_controller_manager_bind_address != '0.0.0.0' else 'localhost' }}"
uri:
@ -101,10 +101,10 @@
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart kube-controller-manager
- Control plane | restart kubelet
- Control plane | Restart kube-controller-manager
- name: Master | wait for the apiserver to be running
- name: Control plane | wait for the apiserver to be running
uri:
url: "{{ kube_apiserver_endpoint }}/healthz"
validate_certs: false
@ -113,5 +113,5 @@
retries: 60
delay: 1
listen:
- Master | restart kubelet
- Master | Restart apiserver
- Control plane | restart kubelet
- Control plane | Restart apiserver

View File

@ -23,7 +23,7 @@
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
when: secrets_encryption_file.stat.exists
- name: Set kube_encrypt_token across master nodes
- name: Set kube_encrypt_token across control plane nodes
set_fact:
kube_encrypt_token: "{{ kube_encrypt_token_extracted }}"
delegate_to: "{{ item }}"

View File

@ -12,6 +12,6 @@
- kubelet.conf
- scheduler.conf
notify:
- "Master | Restart kube-controller-manager"
- "Master | Restart kube-scheduler"
- "Master | reload kubelet"
- "Control plane | Restart kube-controller-manager"
- "Control plane | Restart kube-scheduler"
- "Control plane | reload kubelet"

View File

@ -189,7 +189,7 @@
mode: "0644"
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: Kubeadm | Initialize first master
- name: Kubeadm | Initialize first control plane node
command: >-
timeout -k {{ kubeadm_init_timeout }} {{ kubeadm_init_timeout }}
{{ bin_dir }}/kubeadm init
@ -205,7 +205,7 @@
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
notify: Control plane | restart kubelet
- name: Set kubeadm certificate key
set_fact:
@ -250,7 +250,7 @@
tags:
- kubeadm_token
- name: Kubeadm | Join other masters
- name: Kubeadm | Join other control plane nodes
include_tasks: kubeadm-secondary.yml
- name: Kubeadm | upgrade kubernetes cluster
@ -260,7 +260,7 @@
- kubeadm_already_run.stat.exists
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: Kubeadm | Remove taint for master with node role
- name: Kubeadm | Remove taint for control plane node with node role
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
delegate_to: "{{ first_kube_control_plane }}"
with_items:

View File

@ -9,7 +9,7 @@
delay: 5
until: _result.status == 200
- name: Kubeadm | Upgrade first master
- name: Kubeadm | Upgrade first control plane node
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
@ -28,9 +28,9 @@
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
notify: Control plane | restart kubelet
- name: Kubeadm | Upgrade other masters
- name: Kubeadm | Upgrade other control plane nodes
command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm
@ -49,7 +49,7 @@
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
notify: Control plane | restart kubelet
- name: Kubeadm | Remove binding to anonymous user
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"

View File

@ -6,7 +6,7 @@
line: ' client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: true
notify:
- "Master | reload kubelet"
- "Control plane | reload kubelet"
- name: Fixup kubelet client cert rotation 2/2
lineinfile:
@ -15,4 +15,4 @@
line: ' client-key: /var/lib/kubelet/pki/kubelet-client-current.pem'
backup: true
notify:
- "Master | reload kubelet"
- "Control plane | reload kubelet"

View File

@ -1,5 +1,5 @@
---
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
- name: "Pre-upgrade | Delete control plane manifests if etcd secrets changed"
file:
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
state: absent
@ -8,14 +8,14 @@
register: kube_apiserver_manifest_replaced
when: etcd_secret_changed | default(false)
- name: "Pre-upgrade | Delete master containers forcefully" # noqa no-handler
- name: "Pre-upgrade | Delete control plane containers forcefully" # noqa no-handler
shell: "set -o pipefail && docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
args:
executable: /bin/bash
with_items:
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
register: remove_master_container
register: remove_control_plane_container
retries: 10
until: remove_master_container.rc == 0
until: remove_control_plane_container.rc == 0
delay: 1

View File

@ -71,7 +71,7 @@
owner: "root"
mode: "0644"
when:
- not is_kube_master
- ('kube_control_plane' not in group_names)
- not kubelet_conf.stat.exists
- kubeadm_use_file_discovery
@ -81,7 +81,7 @@
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: true
mode: "0640"
when: not is_kube_master
when: ('kube_control_plane' not in group_names)
- name: Kubeadm | Create directory to store kubeadm patches
file:
@ -101,7 +101,9 @@
- name: Join to cluster if needed
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
when: not is_kube_master and (not kubelet_conf.stat.exists)
when:
- ('kube_control_plane' not in group_names)
- not kubelet_conf.stat.exists
block:
- name: Join to cluster
@ -143,7 +145,7 @@
backup: true
when:
- kubeadm_config_api_fqdn is not defined
- not is_kube_master
- ('kube_control_plane' not in group_names)
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
notify: Kubeadm | restart kubelet
@ -154,7 +156,7 @@
line: ' server: {{ kube_apiserver_endpoint }}'
backup: true
when:
- not is_kube_master
- ('kube_control_plane' not in group_names)
- loadbalancer_apiserver is defined
notify: Kubeadm | restart kubelet
@ -169,8 +171,8 @@
tags:
- kube-proxy
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
# incorrectly to first master, creating SPoF.
# FIXME(mattymo): Need to point to localhost, otherwise control plane nodes will all point
# incorrectly to first control plane node, creating SPoF.
- name: Update server field in kube-proxy kubeconfig
shell: >-
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml

View File

@ -42,7 +42,7 @@ kube_memory_reserved: 256Mi
kube_cpu_reserved: 100m
# kube_ephemeral_storage_reserved: 2Gi
# kube_pid_reserved: "1000"
# Reservation for master hosts
# Reservation for control plane hosts
kube_master_memory_reserved: 512Mi
kube_master_cpu_reserved: 200m
# kube_master_ephemeral_storage_reserved: 2Gi
@ -56,7 +56,7 @@ system_memory_reserved: 512Mi
system_cpu_reserved: 500m
# system_ephemeral_storage_reserved: 2Gi
# system_pid_reserved: "1000"
# Reservation for master hosts
# Reservation for control plane hosts
system_master_memory_reserved: 256Mi
system_master_cpu_reserved: 250m
# system_master_ephemeral_storage_reserved: 2Gi
@ -136,7 +136,7 @@ kubelet_config_extra_args_cgroupfs:
systemCgroups: /system.slice
cgroupRoot: /
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not masters
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not control plane nodes
kubelet_node_config_extra_args: {}
# Maximum number of container log files that can be present for a container.
@ -148,7 +148,7 @@ kubelet_logfiles_max_size: 10Mi
## Support custom flags to be passed to kubelet
kubelet_custom_flags: []
## Support custom flags to be passed to kubelet only on nodes, not masters
## Support custom flags to be passed to kubelet only on nodes, not control plane nodes
kubelet_node_custom_flags: []
# If non-empty, will use this string as identification instead of the actual hostname
@ -216,7 +216,7 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK') | default(''
# azure_vmtype: standard
# Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
azure_loadbalancer_sku: basic
# excludes master nodes from standard load balancer.
# excludes control plane nodes from standard load balancer.
azure_exclude_master_from_standard_lb: true
# disables the outbound SNAT for public load balancer rules
azure_disable_outbound_snat: false

View File

@ -24,7 +24,7 @@
- name: Install kube-vip
import_tasks: loadbalancer/kube-vip.yml
when:
- is_kube_master
- ('kube_control_plane' in group_names)
- kube_vip_enabled
tags:
- kube-vip
@ -32,7 +32,7 @@
- name: Install nginx-proxy
import_tasks: loadbalancer/nginx-proxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'nginx'
tags:
@ -41,7 +41,7 @@
- name: Install haproxy
import_tasks: loadbalancer/haproxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'
- ('kube_control_plane' not in group_names) or (kube_apiserver_bind_address != '0.0.0.0')
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'haproxy'
tags:

View File

@ -64,7 +64,7 @@ clusterDNS:
kubeReservedCgroup: {{ kube_reserved_cgroups }}
{% endif %}
kubeReserved:
{% if is_kube_master | bool %}
{% if 'kube_control_plane' in group_names %}
cpu: "{{ kube_master_cpu_reserved }}"
memory: {{ kube_master_memory_reserved }}
{% if kube_master_ephemeral_storage_reserved is defined %}
@ -86,7 +86,7 @@ kubeReserved:
{% if system_reserved | bool %}
systemReservedCgroup: {{ system_reserved_cgroups }}
systemReserved:
{% if is_kube_master | bool %}
{% if 'kube_control_plane' in group_names %}
cpu: "{{ system_master_cpu_reserved }}"
memory: {{ system_master_memory_reserved }}
{% if system_master_ephemeral_storage_reserved is defined %}
@ -106,10 +106,10 @@ systemReserved:
{% endif %}
{% endif %}
{% endif %}
{% if is_kube_master | bool and eviction_hard_control_plane is defined and eviction_hard_control_plane %}
{% if ('kube_control_plane' in group_names) and (eviction_hard_control_plane is defined) and eviction_hard_control_plane %}
evictionHard:
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
{% elif not is_kube_master | bool and eviction_hard is defined and eviction_hard %}
{% elif ('kube_control_plane' not in group_names) and (eviction_hard is defined) and eviction_hard %}
evictionHard:
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
{% endif %}

View File

@ -60,7 +60,7 @@
- not ignore_assert_errors
- inventory_hostname in groups.get('etcd',[])
- name: Stop if memory is too small for masters
- name: Stop if memory is too small for control plane nodes
assert:
that: ansible_memtotal_mb >= minimal_master_memory_mb
when:

View File

@ -15,7 +15,8 @@
- bootstrap-os
- apps
- network
- master
- master # master tag is deprecated and replaced by control-plane
- control-plane
- node
with_items:
- "{{ kube_config_dir }}"
@ -39,7 +40,8 @@
- bootstrap-os
- apps
- network
- master
- master # master tag is deprecated and replaced by control-plane
- control-plane
- node
with_items:
- "{{ kube_cert_dir }}"

View File

@ -1,12 +1,12 @@
---
- name: "Check_tokens | check if the tokens have already been generated on first master"
- name: "Check_tokens | check if the tokens have already been generated on first control plane node"
stat:
path: "{{ kube_token_dir }}/known_tokens.csv"
get_attributes: false
get_checksum: true
get_mime: false
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: known_tokens_master
register: known_tokens_control_plane
run_once: true
- name: "Check_tokens | Set default value for 'sync_tokens' and 'gen_tokens' to false"
@ -17,7 +17,7 @@
- name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true"
set_fact:
gen_tokens: true
when: not known_tokens_master.stat.exists and kube_token_auth | default(true)
when: not known_tokens_control_plane.stat.exists and kube_token_auth | default(true)
run_once: true
- name: "Check tokens | check if a cert already exists"
@ -34,7 +34,7 @@
{%- set tokens = {'sync': False} -%}
{%- for server in groups['kube_control_plane'] | intersect(ansible_play_batch)
if (not hostvars[server].known_tokens.stat.exists) or
(hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_master.stat.checksum | default('')) -%}
(hostvars[server].known_tokens.stat.checksum | default('') != known_tokens_control_plane.stat.checksum | default('')) -%}
{%- set _ = tokens.update({'sync': True}) -%}
{%- endfor -%}
{{ tokens.sync }}

View File

@ -8,15 +8,15 @@
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
- name: Gen_tokens | generate tokens for master components
- name: Gen_tokens | generate tokens for control plane components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube_control_plane'] }}"
register: gentoken_master
changed_when: "'Added' in gentoken_master.stdout"
register: gentoken_control_plane
changed_when: "'Added' in gentoken_control_plane.stdout"
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
@ -34,7 +34,7 @@
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: gen_tokens | default(false)
- name: Gen_tokens | Get list of tokens from first master
- name: Gen_tokens | Get list of tokens from first control plane node
command: "find {{ kube_token_dir }} -maxdepth 1 -type f"
register: tokens_list
check_mode: false
@ -52,7 +52,7 @@
run_once: true
when: sync_tokens | default(false)
- name: Gen_tokens | Copy tokens on masters
- name: Gen_tokens | Copy tokens on control plane nodes
shell: "set -o pipefail && echo '{{ tokens_data.stdout | quote }}' | base64 -d | tar xz -C /"
args:
executable: /bin/bash

View File

@ -243,7 +243,7 @@ kube_network_node_prefix_ipv6: 120
kube_apiserver_ip: "{{ kube_service_addresses | ansible.utils.ipaddr('net') | ansible.utils.ipaddr(1) | ansible.utils.ipaddr('address') }}"
# NOTE: If you specific address/interface and use loadbalancer_apiserver_localhost
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on masters on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
# loadbalancer_apiserver_localhost (nginx/haproxy) will deploy on control plane nodes on 127.0.0.1:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }} too.
kube_apiserver_bind_address: 0.0.0.0
# https
@ -531,7 +531,6 @@ ssl_ca_dirs: |-
]
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube_control_plane'] }}"
kube_apiserver_count: "{{ groups['kube_control_plane'] | length }}"
kube_apiserver_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
@ -551,9 +550,9 @@ kube_apiserver_global_endpoint: |-
kube_apiserver_endpoint: |-
{% if loadbalancer_apiserver is defined -%}
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
{%- elif not is_kube_master and loadbalancer_apiserver_localhost -%}
{%- elif ('kube_control_plane' not in group_names) and loadbalancer_apiserver_localhost -%}
https://localhost:{{ loadbalancer_apiserver_port | default(kube_apiserver_port) }}
{%- elif is_kube_master -%}
{%- elif 'kube_control_plane' in group_names -%}
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0', '127.0.0.1') }}:{{ kube_apiserver_port }}
{%- else -%}
https://{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
@ -568,7 +567,6 @@ etcd_events_cluster_enabled: false
etcd_hosts: "{{ groups['etcd'] | default(groups['kube_control_plane']) }}"
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
etcd_address: "{{ ip | default(fallback_ips[inventory_hostname]) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"

View File

@ -8,11 +8,11 @@
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- if no_proxy_exclude_workers | default(false) -%}
{% set cluster_or_master = 'kube_control_plane' %}
{% set cluster_or_control_plane = 'kube_control_plane' %}
{%- else -%}
{% set cluster_or_master = 'k8s_cluster' %}
{% set cluster_or_control_plane = 'k8s_cluster' %}
{%- endif -%}
{%- for item in (groups[cluster_or_master] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
{%- for item in (groups[cluster_or_control_plane] + groups['etcd'] | default([]) + groups['calico_rr'] | default([])) | unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
{{ hostvars[item]['ansible_hostname'] }},

View File

@ -1,7 +1,7 @@
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
# each control plane and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:

View File

@ -15,14 +15,14 @@
environment:
KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
with_items: "{{ groups['broken_kube_control_plane'] }}"
register: delete_broken_kube_masters
register: delete_broken_kube_control_plane_nodes
failed_when: false
when: groups['broken_kube_control_plane']
- name: Fail if unable to delete broken kube_control_plane nodes from cluster
fail:
msg: "Unable to delete broken kube_control_plane node: {{ item.item }}"
loop: "{{ delete_broken_kube_masters.results }}"
loop: "{{ delete_broken_kube_control_plane_nodes.results }}"
changed_when: false
when:
- groups['broken_kube_control_plane']

View File

@ -7,7 +7,7 @@
# ignore servers that are not nodes
- inventory_hostname in groups['k8s_cluster'] and kube_override_hostname | default(inventory_hostname) in nodes.stdout_lines
retries: "{{ delete_node_retries }}"
# Sometimes the api-server can have a short window of indisponibility when we delete a master node
# Sometimes the api-server can have a short window of indisponibility when we delete a control plane node
delay: "{{ delete_node_delay_seconds }}"
register: result
until: result is not failed

View File

@ -122,7 +122,7 @@ EOF
fi
# Tests Cases
## Test Master API
## Test Control Plane API
run_playbook tests/testcases/010_check-apiserver.yml
run_playbook tests/testcases/015_check-nodes-ready.yml