kubespray/roles/kubernetes/kubeadm/tasks/main.yml

223 lines
7.2 KiB
YAML
Raw Normal View History

---
- name: Set kubeadm_discovery_address
set_fact:
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
2018-08-07 18:25:31 +08:00
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint | replace("https://", "") }}
{%- endif %}
tags:
- facts
- name: Check if kubelet.conf exists
stat:
path: "{{ kube_config_dir }}/kubelet.conf"
get_attributes: no
get_checksum: no
get_mime: no
register: kubelet_conf
- name: Check if kubeadm CA cert is accessible
stat:
path: "{{ kube_cert_dir }}/ca.crt"
get_attributes: no
get_checksum: no
get_mime: no
register: kubeadm_ca_stat
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
2020-08-05 22:56:28 +08:00
- name: Calculate kubeadm CA cert hash
shell: set -o pipefail && openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
args:
executable: /bin/bash
register: kubeadm_ca_hash
when:
- kubeadm_ca_stat.stat is defined
- kubeadm_ca_stat.stat.exists
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
changed_when: false
- name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "{{ bin_dir }}/kubeadm token create"
register: temp_token
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kubeadm_token is not defined
changed_when: false
- name: Set kubeadm_token to generated token
set_fact:
kubeadm_token: "{{ temp_token.stdout }}"
when: kubeadm_token is not defined
- name: Set kubeadm api version to v1beta3
set_fact:
kubeadmConfig_api_version: v1beta3
- name: Get kubeconfig for join discovery process
command: "{{ kubectl }} -n kube-public get cm cluster-info -o jsonpath='{.data.kubeconfig}'"
register: kubeconfig_file_discovery
run_once: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
when: kubeadm_use_file_discovery
- name: Copy discovery kubeconfig
copy:
dest: "{{ kube_config_dir }}/cluster-info-discovery-kubeconfig.yaml"
content: "{{ kubeconfig_file_discovery.stdout }}"
owner: "root"
mode: 0644
when:
- not is_kube_master
- not kubelet_conf.stat.exists
- kubeadm_use_file_discovery
- name: Create kubeadm client config
template:
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"
2018-12-19 21:16:14 +08:00
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: yes
mode: 0640
when: not is_kube_master
- name: Kubeadm | Create directory to store kubeadm patches
file:
path: "{{ kubeadm_patches.dest_dir }}"
state: directory
mode: 0640
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: Kubeadm | Copy kubeadm patches from inventory files
copy:
src: "{{ kubeadm_patches.source_dir }}/"
dest: "{{ kubeadm_patches.dest_dir }}"
owner: "root"
mode: 0644
when: kubeadm_patches is defined and kubeadm_patches.enabled
- name: Join to cluster if needed
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}:/sbin"
when: not is_kube_master and (not kubelet_conf.stat.exists)
block:
- name: Join to cluster
command: >-
timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }}
{{ bin_dir }}/kubeadm join
--config {{ kube_config_dir }}/kubeadm-client.conf
--ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
register: kubeadm_join
changed_when: kubeadm_join is success
rescue:
- name: Join to cluster with ignores
command: >-
timeout -k {{ kubeadm_join_timeout }} {{ kubeadm_join_timeout }}
{{ bin_dir }}/kubeadm join
--config {{ kube_config_dir }}/kubeadm-client.conf
--ignore-preflight-errors=all
--skip-phases={{ kubeadm_join_phases_skip | join(',') }}
register: kubeadm_join
changed_when: kubeadm_join is success
always:
- name: Display kubeadm join stderr if any
when: kubeadm_join is failed
debug:
msg: |
Joined with warnings
{{ kubeadm_join.stderr_lines }}
- name: Update server field in kubelet kubeconfig
2018-06-07 17:46:15 +08:00
lineinfile:
dest: "{{ kube_config_dir }}/kubelet.conf"
regexp: 'server:'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
when:
- kubeadm_config_api_fqdn is not defined
- not is_kube_master
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
notify: Kubeadm | restart kubelet
- name: Update server field in kubelet kubeconfig - external lb
lineinfile:
dest: "{{ kube_config_dir }}/kubelet.conf"
regexp: '^ server: https'
line: ' server: {{ kube_apiserver_endpoint }}'
backup: yes
when:
- not is_kube_master
- loadbalancer_apiserver is defined
notify: Kubeadm | restart kubelet
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
# incorrectly to first master, creating SPoF.
2020-08-05 22:56:28 +08:00
- name: Update server field in kube-proxy kubeconfig
shell: >-
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
| {{ kubectl }} replace -f -
2020-08-05 22:56:28 +08:00
args:
executable: /bin/bash
run_once: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
delegate_facts: false
when:
- kubeadm_config_api_fqdn is not defined
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "")
- kube_proxy_deployed
- loadbalancer_apiserver_localhost
tags:
- kube-proxy
- name: Update server field in kube-proxy kubeconfig - external lb
shell: >-
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
| sed 's#server:.*#server: {{kube_apiserver_endpoint}}#g'
| {{ kubectl }} replace -f -
args:
executable: /bin/bash
run_once: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
delegate_facts: false
when:
- kube_proxy_deployed
- loadbalancer_apiserver is defined
tags:
- kube-proxy
- name: Set ca.crt file permission
file:
path: "{{ kube_cert_dir }}/ca.crt"
owner: root
group: root
mode: "0644"
2020-07-28 16:39:08 +08:00
- name: Restart all kube-proxy pods to ensure that they load the new configmap
command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
delegate_facts: false
when:
- kubeadm_config_api_fqdn is not defined or loadbalancer_apiserver is defined
- kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") or loadbalancer_apiserver is defined
- kube_proxy_deployed
tags:
- kube-proxy
- name: Extract etcd certs from control plane if using etcd kubeadm mode
include_tasks: kubeadm_etcd_node.yml
when:
- etcd_deployment_type == "kubeadm"
- inventory_hostname not in groups['kube_control_plane']
- kube_network_plugin in ["calico", "flannel", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"