Use K8s 1.14 and add kubeadm experimental control plane mode (#4514)

* Use K8s 1.14 and add kubeadm experimental control plane mode

This reverts commit d39c273d96.

* Cleanup kubeadm setup run on first master

* pin kubeadm_certificate_key in test

* Remove kubelet autolabel of kube-node, add symlink for pki dir

Change-Id: Id5e74dd667c60675dbfe4193b0bc9fb44380e1ca
pull/4583/head
Matthew Mosesohn 2019-04-19 16:01:54 +03:00 committed by Kubernetes Prow Robot
parent d0e628911c
commit 05dc2b3a09
39 changed files with 319 additions and 409 deletions

View File

@ -108,7 +108,7 @@ Supported Components
-------------------- --------------------
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.13.5 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.0
- [etcd](https://github.com/coreos/etcd) v3.2.26 - [etcd](https://github.com/coreos/etcd) v3.2.26
- [docker](https://www.docker.com/) v18.06 (see note) - [docker](https://www.docker.com/) v18.06 (see note)
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2) - [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)

View File

@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: true kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.13.5 kube_version: v1.14.0
# kubernetes image repo define # kubernetes image repo define
kube_image_repo: "gcr.io/google-containers" kube_image_repo: "gcr.io/google-containers"
@ -153,6 +153,10 @@ etcd_deployment_type: docker
kubelet_deployment_type: host kubelet_deployment_type: host
helm_deployment_type: host helm_deployment_type: host
# Enable kubeadm experimental control plane
kubeadm_control_plane: false
kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
# K8s image pull policy (imagePullPolicy) # K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent k8s_image_pull_policy: IfNotPresent

View File

@ -35,7 +35,7 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
image_arch: "{{host_architecture | default('amd64')}}" image_arch: "{{host_architecture | default('amd64')}}"
# Versions # Versions
kube_version: v1.13.5 kube_version: v1.14.0
kubeadm_version: "{{ kube_version }}" kubeadm_version: "{{ kube_version }}"
etcd_version: v3.2.26 etcd_version: v3.2.26

View File

@ -40,18 +40,35 @@
run_once: yes run_once: yes
when: kubeconfig_localhost|default(false) when: kubeconfig_localhost|default(false)
# NOTE(mattymo): Please forgive this workaround
- name: Generate admin kubeconfig with external api endpoint - name: Generate admin kubeconfig with external api endpoint
shell: >- shell: >-
{{ bin_dir }}/kubeadm alpha {% if kubeadm_version is version('v1.14.0', '>=') %}
{% if kubeadm_version is version('v1.13.0', '<') %} mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
phase
{% endif %} {% endif %}
{{ bin_dir }}/kubeadm
{% if kubeadm_version is version('v1.14.0', '>=') %}
init phase
{% elif kubeadm_version is version('v1.13.0', '>=') %}
alpha
{% else %}
alpha phase
{% endif %}
{% if kubeadm_version is version('v1.14.0', '>=') %}
kubeconfig admin
--kubeconfig-dir {{ kube_config_dir }}/external_kubeconfig
{% else %}
kubeconfig user kubeconfig user
--client-name kubernetes-admin --client-name kubernetes-admin
--org system:masters --org system:masters
{% endif %}
--cert-dir {{ kube_config_dir }}/ssl --cert-dir {{ kube_config_dir }}/ssl
--apiserver-advertise-address {{ external_apiserver_address }} --apiserver-advertise-address {{ external_apiserver_address }}
--apiserver-bind-port {{ external_apiserver_port }} --apiserver-bind-port {{ external_apiserver_port }}
{% if kubeadm_version is version('v1.14.0', '>=') %}
&& cat {{ kube_config_dir }}/external_kubeconfig/admin.conf &&
rm -rf {{ kube_config_dir }}/external_kubeconfig
{% endif %}
environment: "{{ proxy_env }}" environment: "{{ proxy_env }}"
run_once: yes run_once: yes
register: admin_kubeconfig register: admin_kubeconfig

View File

@ -23,9 +23,15 @@
- name: Create kubeadm token for joining nodes with 24h expiration (default) - name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "{{ bin_dir }}/kubeadm token create" command: "{{ bin_dir }}/kubeadm token create"
run_once: true
register: temp_token register: temp_token
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master'][0] }}"
when: kubeadm_token is not defined
- name: Set kubeadm_token to generated token
set_fact:
kubeadm_token: "{{ temp_token.stdout }}"
when: kubeadm_token is not defined
- name: gets the kubeadm version - name: gets the kubeadm version
command: "{{ bin_dir }}/kubeadm version -o short" command: "{{ bin_dir }}/kubeadm version -o short"
@ -61,8 +67,6 @@
dest: "{{ kube_config_dir }}/kubeadm-client.conf" dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: yes backup: yes
when: not is_kube_master when: not is_kube_master
vars:
kubeadm_token: "{{ temp_token.stdout }}"
- name: Join to cluster if needed - name: Join to cluster if needed
environment: environment:
@ -122,11 +126,10 @@
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
| sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g' | sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g'
| {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f - | {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
delegate_to: "{{groups['kube-master']|first}}"
run_once: true run_once: true
when: when:
- inventory_hostname == groups['kube-master']|first
- kubeadm_config_api_fqdn is not defined - kubeadm_config_api_fqdn is not defined
- is_kube_master
- kubeadm_discovery_address != kube_apiserver_endpoint - kubeadm_discovery_address != kube_apiserver_endpoint
- not kube_proxy_remove - not kube_proxy_remove
tags: tags:
@ -134,11 +137,10 @@
- name: Restart all kube-proxy pods to ensure that they load the new configmap - name: Restart all kube-proxy pods to ensure that they load the new configmap
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0" shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
delegate_to: "{{groups['kube-master']|first}}"
run_once: true run_once: true
when: when:
- inventory_hostname == groups['kube-master']|first
- kubeadm_config_api_fqdn is not defined - kubeadm_config_api_fqdn is not defined
- is_kube_master
- kubeadm_discovery_address != kube_apiserver_endpoint - kubeadm_discovery_address != kube_apiserver_endpoint
- not kube_proxy_remove - not kube_proxy_remove
tags: tags:
@ -159,11 +161,10 @@
# is fixed # is fixed
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy" shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
delegate_to: "{{groups['kube-master']|first}}"
run_once: true run_once: true
when: when:
- inventory_hostname == groups['kube-master']|first
- kube_proxy_remove - kube_proxy_remove
- is_kube_master
- kubeadm_discovery_address != kube_apiserver_endpoint - kubeadm_discovery_address != kube_apiserver_endpoint
tags: tags:
- kube-proxy - kube-proxy

View File

@ -23,11 +23,18 @@ kube_apiserver_storage_backend: etcd3
# By default, force back to etcd2. Set to true to force etcd3 (experimental!) # By default, force back to etcd2. Set to true to force etcd3 (experimental!)
force_etcd3: false force_etcd3: false
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Associated interfaces must be reachable by the rest of the cluster, and by # Associated interfaces must be reachable by the rest of the cluster, and by
# CLI/web clients. # CLI/web clients.
kube_controller_manager_bind_address: 0.0.0.0 kube_controller_manager_bind_address: 0.0.0.0
kube_scheduler_bind_address: 0.0.0.0 kube_scheduler_bind_address: 0.0.0.0
# discovery_timeout modifies the discovery timeout
discovery_timeout: 5m0s
# audit support # audit support
kubernetes_audit: false kubernetes_audit: false
# path to audit log file # path to audit log file
@ -78,7 +85,6 @@ kube_apiserver_request_timeout: "1m0s"
# 1.9 and below Admission control plug-ins # 1.9 and below Admission control plug-ins
kube_apiserver_admission_control: kube_apiserver_admission_control:
- Initializers
- NamespaceLifecycle - NamespaceLifecycle
- LimitRanger - LimitRanger
- ServiceAccount - ServiceAccount
@ -99,8 +105,7 @@ kube_apiserver_enable_admission_plugins: []
kube_apiserver_disable_admission_plugins: [] kube_apiserver_disable_admission_plugins: []
# extra runtime config # extra runtime config
kube_api_runtime_config: kube_api_runtime_config: []
- admissionregistration.k8s.io/v1alpha1
## Enable/Disable Kube API Server Authentication Methods ## Enable/Disable Kube API Server Authentication Methods
kube_basic_auth: false kube_basic_auth: false

View File

@ -12,33 +12,3 @@
- {src: front-proxy-client.crt, dest: front-proxy-client.crt.old} - {src: front-proxy-client.crt, dest: front-proxy-client.crt.old}
- {src: front-proxy-client.key, dest: front-proxy-client.key.old} - {src: front-proxy-client.key, dest: front-proxy-client.key.old}
ignore_errors: yes ignore_errors: yes
- name: Remove old certs and keys
file:
path: "{{ kube_cert_dir }}/{{ item }}"
state: absent
with_items:
- apiserver.crt
- apiserver.key
- apiserver-kubelet-client.crt
- apiserver-kubelet-client.key
- front-proxy-client.crt
- front-proxy-client.key
- name: Generate new certs and keys
command: "{{ bin_dir }}/kubeadm init phase certs {{ item }} --config={{ kube_config_dir }}/kubeadm-config.yaml"
environment: "{{ proxy_env }}"
with_items:
- apiserver
- apiserver-kubelet-client
- front-proxy-client
when: inventory_hostname == groups['kube-master']|first and kubeadm_version is version('v1.13.0', '>=')
- name: Generate new certs and keys
command: "{{ bin_dir }}/kubeadm alpha phase certs {{ item }} --config={{ kube_config_dir }}/kubeadm-config.yaml"
environment: "{{ proxy_env }}"
with_items:
- apiserver
- apiserver-kubelet-client
- front-proxy-client
when: inventory_hostname == groups['kube-master']|first and kubeadm_version is version('v1.13.0', '<')

View File

@ -1,34 +0,0 @@
---
- name: Backup old configuration files
copy:
src: "{{ kube_config_dir }}/{{ item.src }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}"
remote_src: yes
with_items:
- {src: admin.conf, dest: admin.conf.old}
- {src: kubelet.conf, dest: kubelet.conf.old}
- {src: controller-manager.conf, dest: controller-manager.conf.old}
- {src: scheduler.conf, dest: scheduler.conf.old}
ignore_errors: yes
- name: Remove old configuration files
file:
path: "{{ kube_config_dir }}/{{ item }}"
state: absent
with_items:
- admin.conf
- kubelet.conf
- controller-manager.conf
- scheduler.conf
- name: Generate new configuration files
command: "{{ bin_dir }}/kubeadm init phase kubeconfig all --config={{ kube_config_dir }}/kubeadm-config.yaml"
environment: "{{ proxy_env }}"
when: kubeadm_version is version('v1.13.0', '>=')
ignore_errors: yes
- name: Generate new configuration files
command: "{{ bin_dir }}/kubeadm alpha phase kubeconfig all --config={{ kube_config_dir }}/kubeadm-config.yaml"
environment: "{{ proxy_env }}"
when: kubeadm_version is version('v1.13.0', '<')
ignore_errors: yes

View File

@ -0,0 +1,45 @@
---
- name: Set kubeadm_discovery_address
set_fact:
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint }}
{%- endif %}
tags:
- facts
- name: Create kubeadm ControlPlane config
template:
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
backup: yes
when:
- inventory_hostname != groups['kube-master']|first
- not kubeadm_already_run.stat.exists
- name: Wait for k8s apiserver
wait_for:
host: "{{kubeadm_discovery_address.split(':')[0]}}"
port: "{{kubeadm_discovery_address.split(':')[1]}}"
timeout: 180
- name: Joining control plane node to the cluster.
command: >-
{{ bin_dir }}/kubeadm join
--config {{ kube_config_dir}}/kubeadm-controlplane.yaml
--ignore-preflight-errors=all
{% if kubeadm_certificate_key is defined %}
--certificate-key={{ kubeadm_certificate_key }}
{% endif %}
register: kubeadm_join_control_plane
when:
- inventory_hostname != groups['kube-master']|first
- not kubeadm_already_run.stat.exists
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
- name: Set secret_changed to false to avoid extra token rotation
set_fact:
secret_changed: false

View File

@ -0,0 +1,44 @@
---
- name: slurp kubeadm certs
slurp:
src: "{{ item }}"
with_items:
- "{{ kube_cert_dir }}/apiserver.crt"
- "{{ kube_cert_dir }}/apiserver.key"
- "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
- "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
- "{{ kube_cert_dir }}/ca.crt"
- "{{ kube_cert_dir }}/ca.key"
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
- "{{ kube_cert_dir }}/front-proxy-ca.key"
- "{{ kube_cert_dir }}/front-proxy-client.crt"
- "{{ kube_cert_dir }}/front-proxy-client.key"
- "{{ kube_cert_dir }}/sa.key"
- "{{ kube_cert_dir }}/sa.pub"
register: kubeadm_certs
delegate_to: "{{ groups['kube-master']|first }}"
- name: kubeadm | write out kubeadm certs
copy:
dest: "{{ item.item }}"
content: "{{ item.content | b64decode }}"
owner: root
group: root
mode: 0600
no_log: true
register: copy_kubeadm_certs
with_items: "{{ kubeadm_certs.results }}"
when: inventory_hostname != groups['kube-master']|first
- name: kubeadm | Init other uninitialized masters
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
register: kubeadm_init
retries: 10
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
when:
- inventory_hostname != groups['kube-master']|first
- not kubeadm_already_run.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
environment:
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet

View File

@ -10,11 +10,10 @@
import_tasks: kubeadm-migrate-certs.yml import_tasks: kubeadm-migrate-certs.yml
when: old_apiserver_cert.stat.exists when: old_apiserver_cert.stat.exists
- name: kubeadm | Check apiserver key - name: kubeadm | Check serviceaccount key
stat: stat:
path: "{{ kube_cert_dir }}/apiserver.key" path: "{{ kube_cert_dir }}/sa.key"
register: apiserver_key_before register: sa_key_before
delegate_to: "{{groups['kube-master']|first}}"
run_once: true run_once: true
- name: kubeadm | Check if kubeadm has already run - name: kubeadm | Check if kubeadm has already run
@ -62,10 +61,6 @@
sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}" sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
tags: facts tags: facts
- name: kubeadm | Copy etcd cert dir under k8s cert dir
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
changed_when: false
- name: Create audit-policy directory - name: Create audit-policy directory
file: file:
path: "{{ audit_policy_file | dirname }}" path: "{{ audit_policy_file | dirname }}"
@ -94,7 +89,18 @@
- kubeadm_already_run.stat.exists - kubeadm_already_run.stat.exists
- name: kubeadm | Initialize first master - name: kubeadm | Initialize first master
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all command: >-
timeout -k 600s 600s
{{ bin_dir }}/kubeadm init
--config={{ kube_config_dir }}/kubeadm-config.yaml
--ignore-preflight-errors=all
{% if kubeadm_version is version('v1.14.0', '>=') %}
--experimental-upload-certs
{% endif %}
--skip-phases=addon/coredns
{% if kubeadm_certificate_key is defined %}
--certificate-key={{ kubeadm_certificate_key }}
{% endif %}
register: kubeadm_init register: kubeadm_init
# Retry is because upload config sometimes fails # Retry is because upload config sometimes fails
retries: 3 retries: 3
@ -105,76 +111,73 @@
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet notify: Master | restart kubelet
- name: slurp kubeadm certs - name: set kubeadm certificate key
slurp: set_fact:
src: "{{ item }}" kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
with_items: with_items: "{{ (hostvars['kube-master'][0]['kubeadm_init']|default({'stdout_lines': []}))['stdout_lines'] }}"
- "{{ kube_cert_dir }}/apiserver.crt"
- "{{ kube_cert_dir }}/apiserver.key"
- "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
- "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
- "{{ kube_cert_dir }}/ca.crt"
- "{{ kube_cert_dir }}/ca.key"
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
- "{{ kube_cert_dir }}/front-proxy-ca.key"
- "{{ kube_cert_dir }}/front-proxy-client.crt"
- "{{ kube_cert_dir }}/front-proxy-client.key"
- "{{ kube_cert_dir }}/sa.key"
- "{{ kube_cert_dir }}/sa.pub"
register: kubeadm_certs
delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
- name: kubeadm | write out kubeadm certs
copy:
dest: "{{ item.item }}"
content: "{{ item.content | b64decode }}"
owner: root
group: root
mode: 0600
no_log: true
register: copy_kubeadm_certs
with_items: "{{ kubeadm_certs.results }}"
when: inventory_hostname != groups['kube-master']|first
- name: kubeadm | Kubeconfig management with kubeadm
import_tasks: kubeadm-kubeconfig.yml
when: when:
- not upgrade_cluster_setup - kubeadm_version is version('v1.14.0', '>=')
- kubeadm_already_run.stat.exists - kubeadm_certificate_key is not defined
- item | trim | match('.*--certificate-key .*')
- hostvars['kube-master'][0]['kubeadm_init']['stdout_lines'] is defined
- name: kubeadm | Init other uninitialized masters - name: Create kubeadm token for joining nodes with 24h expiration (default)
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all command: "{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create"
register: kubeadm_init register: temp_token
retries: 10 retries: 5
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr delay: 5
when: inventory_hostname != groups['kube-master']|first and not kubeadm_already_run.stat.exists until: temp_token is succeeded
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr delegate_to: "{{groups['kube-master']|first}}"
environment: when: kubeadm_token is not defined
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}" tags:
notify: Master | restart kubelet - kubeadm_token
- name: kubeadm | upgrage kubernetes cluster - name: Set kubeadm_token
set_fact:
kubeadm_token: "{{ temp_token.stdout }}"
when: temp_token.stdout is defined
tags:
- kubeadm_token
- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
shell: >-
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token delete {{ kubeadm_token }} || :;
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }}
when:
- inventory_hostname == groups['kube-master']|first
- kubeadm_token is defined
tags:
- kubeadm_token
- name: kubeadm | Initialize other masters (experimental control plane)
include: kubeadm-secondary-experimental.yml
when: kubeadm_control_plane
- name: kubeadm | Initialize other masters (experimental control plane)
include: kubeadm-secondary-legacy.yml
when: not kubeadm_control_plane
- name: kubeadm | upgrade kubernetes cluster
import_tasks: kubeadm-upgrade.yml import_tasks: kubeadm-upgrade.yml
when: upgrade_cluster_setup when: upgrade_cluster_setup
- name: kubeadm | Check apiserver key again - name: kubeadm | Check serviceaccount key again
stat: stat:
path: "{{ kube_cert_dir }}/apiserver.key" path: "{{ kube_cert_dir }}/sa.key"
register: apiserver_key_after register: sa_key_after
delegate_to: "{{groups['kube-master']|first}}"
run_once: true run_once: true
- name: kubeadm | Set secret_changed if service account key was updated - name: kubeadm | Set secret_changed if service account key was updated
command: /bin/true command: /bin/true
notify: Master | set secret_changed notify: Master | set secret_changed
when: apiserver_key_before.stat.checksum|default("") != apiserver_key_after.stat.checksum when: sa_key_before.stat.checksum|default("") != sa_key_after.stat.checksum
- name: kubeadm | cleanup old certs if necessary - name: kubeadm | cleanup old certs if necessary
import_tasks: kubeadm-cleanup-old-certs.yml import_tasks: kubeadm-cleanup-old-certs.yml
when: when:
- old_apiserver_cert.stat.exists - old_apiserver_cert.stat.exists
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: kubeadm | Remove taint for master with node role - name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-" command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
delegate_to: "{{groups['kube-master']|first}}" delegate_to: "{{groups['kube-master']|first}}"

View File

@ -17,6 +17,8 @@
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr
notify: Master | restart kubelet notify: Master | restart kubelet
# FIXME: https://github.com/kubernetes/kubeadm/issues/1498 remove stdout_lines
# check after issue is fixed
- name: kubeadm | Upgrade other masters - name: kubeadm | Upgrade other masters
command: >- command: >-
timeout -k 600s 600s timeout -k 600s 600s
@ -29,5 +31,8 @@
--etcd-upgrade=false --etcd-upgrade=false
register: kubeadm_upgrade register: kubeadm_upgrade
when: inventory_hostname != groups['kube-master']|first when: inventory_hostname != groups['kube-master']|first
failed_when: kubeadm_upgrade.rc != 0 and "field is immutable" not in kubeadm_upgrade.stderr failed_when:
- kubeadm_upgrade.rc != 0
- '"field is immutable" not in kubeadm_upgrade.stderr'
- kubeadm_upgrade.stdout_lines | length > 1
notify: Master | restart kubelet notify: Master | restart kubelet

View File

@ -3,11 +3,6 @@
command: "{{ bin_dir }}/kubeadm version -o short" command: "{{ bin_dir }}/kubeadm version -o short"
register: kubeadm_output register: kubeadm_output
- name: sets kubeadm api version to v1alpha1
set_fact:
kubeadmConfig_api_version: v1alpha1
when: kubeadm_output.stdout is version('v1.11.0', '<')
- name: sets kubeadm api version to v1alpha2 - name: sets kubeadm api version to v1alpha2
set_fact: set_fact:
kubeadmConfig_api_version: v1alpha2 kubeadmConfig_api_version: v1alpha2

View File

@ -3,8 +3,8 @@
command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions" command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions"
environment: environment:
ETCDCTL_API: 2 ETCDCTL_API: 2
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
register: old_data_exists register: old_data_exists
delegate_to: "{{groups['etcd'][0]}}" delegate_to: "{{groups['etcd'][0]}}"
changed_when: false changed_when: false

View File

@ -1,204 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
api:
{% if kubeadm_config_api_fqdn is defined %}
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}
bindPort: {{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
{% else %}
advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }}
bindPort: {{ kube_apiserver_port }}
{% endif %}
etcd:
endpoints:
{% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }}
{% endfor %}
caFile: {{ etcd_cert_dir }}/ca.pem
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
networking:
dnsDomain: {{ dns_domain }}
serviceSubnet: {{ kube_service_addresses }}
podSubnet: {{ kube_pods_subnet }}
kubernetesVersion: {{ kube_version }}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
cloudProvider: {{cloud_provider}}
cloudConfig: {{ kube_config_dir }}/cloud_config
{% elif cloud_provider is defined and cloud_provider in ["external"] %}
cloudConfig: {{ kube_config_dir }}/cloud_config
{% endif %}
{% if kube_proxy_mode == 'ipvs' %}
kubeProxy:
config:
{% if kube_version is version('v1.10', '<') %}
featureGates: SupportIPVSProxyMode=true
{% endif %}
{% if kube_version is version('v1.10', '>=') %}
featureGates:
SupportIPVSProxyMode: true
{% endif %}
mode: ipvs
{% endif %}
{% if kube_proxy_nodeport_addresses %}
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
{% endif %}
resourceContainer: ""
authorizationModes:
{% for mode in authorization_modes %}
- {{ mode }}
{% endfor %}
selfHosted: false
apiServerExtraArgs:
bind-address: {{ kube_apiserver_bind_address }}
{% if kube_apiserver_insecure_port|string != "0" %}
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
{% endif %}
insecure-port: "{{ kube_apiserver_insecure_port }}"
{% if kube_version is version('v1.10', '<') %}
admission-control: {{ kube_apiserver_admission_control | join(',') }}
{% else %}
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
{% endif %}
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
{% endif %}
{% endif %}
apiserver-count: "{{ kube_apiserver_count }}"
{% if kube_version is version('v1.9', '>=') %}
endpoint-reconciler-type: lease
{% endif %}
{% if etcd_events_cluster_enabled %}
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
profiling: "{{ kube_profiling }}"
request-timeout: "{{ kube_apiserver_request_timeout }}"
repair-malformed-updates: "false"
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=') %}
anonymous-auth: "{{ kube_api_anonymous_auth }}"
{% endif %}
{% if kube_basic_auth|default(true) %}
basic-auth-file: {{ kube_users_dir }}/known_users.csv
{% endif %}
{% if kube_token_auth|default(true) %}
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
{% endif %}
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
oidc-issuer-url: {{ kube_oidc_url }}
oidc-client-id: {{ kube_oidc_client_id }}
{% if kube_oidc_ca_file is defined %}
oidc-ca-file: {{ kube_oidc_ca_file }}
{% endif %}
{% if kube_oidc_username_claim is defined %}
oidc-username-claim: {{ kube_oidc_username_claim }}
{% endif %}
{% if kube_oidc_groups_claim is defined %}
oidc-groups-claim: {{ kube_oidc_groups_claim }}
{% endif %}
{% if kube_oidc_username_prefix is defined %}
oidc-username-prefix: "{{ kube_oidc_username_prefix }}"
{% endif %}
{% if kube_oidc_groups_prefix is defined %}
oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
{% endif %}
{% endif %}
{% if kube_webhook_token_auth|default(false) %}
authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
{% endif %}
{% if kube_encrypt_secret_data %}
experimental-encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
{% endif %}
storage-backend: {{ kube_apiserver_storage_backend }}
{% if kube_api_runtime_config is defined %}
runtime-config: {{ kube_api_runtime_config | join(',') }}
{% endif %}
allow-privileged: "true"
{% for key in kube_kubeadm_apiserver_extra_args %}
{{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
{% endfor %}
{% if kube_feature_gates %}
feature-gates: {{ kube_feature_gates|join(',') }}
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
configure-cloud-routes: "true"
{% endif %}
controllerManagerExtraArgs:
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
node-monitor-period: {{ kube_controller_node_monitor_period }}
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
profiling: "{{ kube_profiling }}"
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
{% if kube_feature_gates %}
feature-gates: {{ kube_feature_gates|join(',') }}
{% endif %}
{% for key in kube_kubeadm_controller_extra_args %}
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
{% endfor %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
controllerManagerExtraVolumes:
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
- name: openstackcacert
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
- name: cloud-config
hostPath: {{ kube_config_dir }}/cloud_config
mountPath: {{ kube_config_dir }}/cloud_config
{% endif %}
{% endif %}
schedulerExtraArgs:
profiling: "{{ kube_profiling }}"
{% if kube_feature_gates %}
feature-gates: {{ kube_feature_gates|join(',') }}
{% endif %}
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
{% for key in kube_kubeadm_scheduler_extra_args %}
{{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
{% endfor %}
{% endif %}
{% if kube_basic_auth|default(true) or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ssl_ca_dirs|length %}
apiServerExtraVolumes:
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
- name: cloud-config
hostPath: {{ kube_config_dir }}/cloud_config
mountPath: {{ kube_config_dir }}/cloud_config
{% endif %}
{% if kube_basic_auth|default(true) %}
- name: basic-auth-config
hostPath: {{ kube_users_dir }}
mountPath: {{ kube_users_dir }}
{% endif %}
{% if kube_token_auth|default(true) %}
- name: token-auth-config
hostPath: {{ kube_token_dir }}
mountPath: {{ kube_token_dir }}
{% endif %}
{% if kube_webhook_token_auth|default(false) %}
- name: webhook-token-auth-config
hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
{% endif %}
{% if ssl_ca_dirs|length %}
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath: {{ dir }}
mountPath: {{ dir }}
writable: false
{% endfor %}
{% endif %}
{% endif %}
apiServerCertSANs:
{% for san in apiserver_sans %}
- {{ san }}
{% endfor %}
certificatesDir: {{ kube_cert_dir }}
imageRepository: {{ kube_image_repo }}
unifiedControlPlaneImage: ""
{% if kube_override_hostname|default('') %}
nodeName: {{ kube_override_hostname }}
{% endif %}

View File

@ -14,9 +14,9 @@ etcd:
{% for endpoint in etcd_access_addresses.split(',') %} {% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }} - {{ endpoint }}
{% endfor %} {% endfor %}
caFile: {{ etcd_cert_dir }}/ca.pem caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
networking: networking:
dnsDomain: {{ dns_domain }} dnsDomain: {{ dns_domain }}
serviceSubnet: {{ kube_service_addresses }} serviceSubnet: {{ kube_service_addresses }}
@ -221,10 +221,12 @@ nodeRegistration:
{% if kube_override_hostname|default('') %} {% if kube_override_hostname|default('') %}
name: {{ kube_override_hostname }} name: {{ kube_override_hostname }}
{% endif %} {% endif %}
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %} {% if inventory_hostname not in groups['kube-node'] %}
taints: taints:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
{% else %}
taints: {}
{% endif %} {% endif %}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock

View File

@ -7,10 +7,12 @@ nodeRegistration:
{% if kube_override_hostname|default('') %} {% if kube_override_hostname|default('') %}
name: {{ kube_override_hostname }} name: {{ kube_override_hostname }}
{% endif %} {% endif %}
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %} {% if inventory_hostname not in groups['kube-node'] %}
taints: taints:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
{% else %}
taints: {}
{% endif %} {% endif %}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
@ -29,9 +31,9 @@ etcd:
{% for endpoint in etcd_access_addresses.split(',') %} {% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }} - {{ endpoint }}
{% endfor %} {% endfor %}
caFile: {{ etcd_cert_dir }}/ca.pem caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
networking: networking:
dnsDomain: {{ dns_domain }} dnsDomain: {{ dns_domain }}
serviceSubnet: {{ kube_service_addresses }} serviceSubnet: {{ kube_service_addresses }}

View File

@ -11,6 +11,8 @@ nodeRegistration:
taints: taints:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
{% else %}
taints: []
{% endif %} {% endif %}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
@ -29,9 +31,9 @@ etcd:
{% for endpoint in etcd_access_addresses.split(',') %} {% for endpoint in etcd_access_addresses.split(',') %}
- {{ endpoint }} - {{ endpoint }}
{% endfor %} {% endfor %}
caFile: {{ etcd_cert_dir }}/ca.pem caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
networking: networking:
dnsDomain: {{ dns_domain }} dnsDomain: {{ dns_domain }}
serviceSubnet: {{ kube_service_addresses }} serviceSubnet: {{ kube_service_addresses }}

View File

@ -0,0 +1,26 @@
apiVersion: kubeadm.k8s.io/v1beta1
kind: JoinConfiguration
discovery:
bootstrapToken:
{% if kubeadm_config_api_fqdn is defined %}
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
{% else %}
apiServerEndpoint: {{ kubeadm_discovery_address | replace("https://", "")}}
{% endif %}
token: {{ kubeadm_token }}
unsafeSkipCAVerification: true
timeout: {{ discovery_timeout }}
tlsBootstrapToken: {{ kubeadm_token }}
controlPlane:
localAPIEndpoint:
advertiseAddress: {{ kube_apiserver_address }}
bindPort: {{ kube_apiserver_port }}
nodeRegistration:
name: {{ inventory_hostname }}
{% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %}
criSocket: /var/run/dockershim.sock
{% endif %}

View File

@ -84,14 +84,6 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{# Kubelet node labels #} {# Kubelet node labels #}
{% set role_node_labels = [] %} {% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %}
{% set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
{% if not standalone_kubelet|bool %}
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %}
{% else %}
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %}
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %} {% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
{% if inventory_hostname in nvidia_gpu_nodes %} {% if inventory_hostname in nvidia_gpu_nodes %}
{% set dummy = role_node_labels.append('nvidia.com/gpu=true') %} {% set dummy = role_node_labels.append('nvidia.com/gpu=true') %}

View File

@ -24,6 +24,8 @@ disable_ipv6_dns: false
kube_cert_group: kube-cert kube_cert_group: kube-cert
kube_config_dir: /etc/kubernetes kube_config_dir: /etc/kubernetes
kube_cert_dir: "{{ kube_config_dir }}/ssl"
kube_cert_compat_dir: /etc/kubernetes/pki
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs # for hostnet pods and infra needs

View File

@ -23,6 +23,14 @@
- "{{ kube_manifest_dir }}" - "{{ kube_manifest_dir }}"
- "{{ kube_script_dir }}" - "{{ kube_script_dir }}"
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
file:
src: "{{ kube_cert_dir }}"
dest: "{{ kube_cert_compat_dir }}"
state: link
when:
- kube_cert_dir != kube_cert_compat_dir
- name: Create cni directories - name: Create cni directories
file: file:
path: "{{ item }}" path: "{{ item }}"

View File

@ -12,7 +12,7 @@ is_atomic: false
disable_swap: true disable_swap: true
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.13.5 kube_version: v1.14.0
## Kube Proxy mode One of ['iptables','ipvs'] ## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode: ipvs kube_proxy_mode: ipvs
@ -97,6 +97,9 @@ kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is where all the cert scripts and certs will be located # This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl" kube_cert_dir: "{{ kube_config_dir }}/ssl"
# compatibility directory for kubeadm
kube_cert_compat_dir: "/etc/kubernetes/pki"
# This is where all of the bearer tokens will be stored # This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens" kube_token_dir: "{{ kube_config_dir }}/tokens"
@ -335,6 +338,9 @@ kube_feature_gates: |-
{{ feature_gate_v1_12 }} {{ feature_gate_v1_12 }}
{%- endif %} {%- endif %}
# Enable kubeadm experimental control plane
kubeadm_control_plane: false
# Local volume provisioner storage classes # Local volume provisioner storage classes
# Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted # Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted
# see https://github.com/ansible/ansible/issues/17324 # see https://github.com/ansible/ansible/issues/17324
@ -383,7 +389,7 @@ no_proxy: >-
{%- endif -%} {%- endif -%}
{%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%} {%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}, {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
{%- if item != hostvars[item].get('ansible_hostname', "") -%} {%- if item != hostvars[item].get('ansible_hostname', '') -%}
{{ hostvars[item]['ansible_hostname'] }}, {{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }}, {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
{%- endif -%} {%- endif -%}

View File

@ -61,3 +61,7 @@ calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostna
### do not enable this, this is detected in scope of tasks, this is just a default value ### do not enable this, this is detected in scope of tasks, this is just a default value
calico_upgrade_needed: false calico_upgrade_needed: false
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem

View File

@ -1,15 +1,14 @@
--- ---
- name: restart calico-node - name: reset_calico_cni
command: /bin/true command: /bin/true
notify: notify:
- Calico | reload systemd - delete 10-calico.conflist
- Calico | reload calico-node - delete calico-node containers
- name: Calico | reload systemd - name: delete 10-calico.conflist
shell: systemctl daemon-reload file:
path: /etc/calico/10-calico.conflist
state: absent
- name: Calico | reload calico-node - name: delete calico-node containers
service: shell: "docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty docker rm -f"
name: calico-node
state: restarted
sleep: 10

View File

@ -10,3 +10,7 @@ calico_rr_memory_limit: 1000M
calico_rr_cpu_limit: 300m calico_rr_cpu_limit: 300m
calico_rr_memory_requests: 128M calico_rr_memory_requests: 128M
calico_rr_cpu_requests: 150m calico_rr_cpu_requests: 150m
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem

View File

@ -22,9 +22,9 @@
state: hard state: hard
force: yes force: yes
with_items: with_items:
- {s: "ca.pem", d: "ca_cert.crt"} - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
- name: Calico-rr | Create dir for logs - name: Calico-rr | Create dir for logs
file: file:

View File

@ -11,6 +11,8 @@
src: "cni-calico.conflist.j2" src: "cni-calico.conflist.j2"
dest: "/etc/cni/net.d/{% if calico_version is version('v3.3.0', '>=') %}calico.conflist.template{% else %}10-calico.conflist{% endif %}" dest: "/etc/cni/net.d/{% if calico_version is version('v3.3.0', '>=') %}calico.conflist.template{% else %}10-calico.conflist{% endif %}"
owner: kube owner: kube
register: calico_conflist
notify: reset_calico_cni
- name: Calico | Create calico certs directory - name: Calico | Create calico certs directory
file: file:
@ -27,9 +29,9 @@
state: hard state: hard
force: yes force: yes
with_items: with_items:
- {s: "ca.pem", d: "ca_cert.crt"} - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
- name: Calico | Install calicoctl wrapper script - name: Calico | Install calicoctl wrapper script
template: template:

View File

@ -4,6 +4,6 @@ metadata:
spec: spec:
datastoreType: "etcdv2" datastoreType: "etcdv2"
etcdEndpoints: "{{ etcd_access_addresses }}" etcdEndpoints: "{{ etcd_access_addresses }}"
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" etcdKeyFile: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" etcdCertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem" etcdCACertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}"

View File

@ -4,6 +4,6 @@ metadata:
spec: spec:
datastoreType: "etcdv3" datastoreType: "etcdv3"
etcdEndpoints: "{{ etcd_access_addresses }}" etcdEndpoints: "{{ etcd_access_addresses }}"
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" etcdKeyFile: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" etcdCertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem" etcdCACertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}"

View File

@ -30,3 +30,8 @@ calicoctl_memory_limit: 170M
calicoctl_cpu_limit: 100m calicoctl_cpu_limit: 100m
calicoctl_memory_requests: 32M calicoctl_memory_requests: 32M
calicoctl_cpu_requests: 25m calicoctl_cpu_requests: 25m
# etcd cert filenames
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem

View File

@ -20,9 +20,9 @@
state: hard state: hard
force: yes force: yes
with_items: with_items:
- {s: "ca.pem", d: "ca_cert.crt"} - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
- name: Canal | Set Flannel etcd configuration - name: Canal | Set Flannel etcd configuration
command: |- command: |-

View File

@ -5,6 +5,9 @@ cilium_disable_ipv4: false
# Etcd SSL dirs # Etcd SSL dirs
cilium_cert_dir: /etc/cilium/certs cilium_cert_dir: /etc/cilium/certs
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Cilium Network Policy directory # Cilium Network Policy directory
cilium_policy_dir: /etc/kubernetes/policy cilium_policy_dir: /etc/kubernetes/policy

View File

@ -21,9 +21,9 @@
state: hard state: hard
force: yes force: yes
with_items: with_items:
- {s: "ca.pem", d: "ca_cert.crt"} - {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} - {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
- name: Cilium | Create Cilium node manifests - name: Cilium | Create Cilium node manifests
template: template:

View File

@ -6,6 +6,8 @@ cloud_machine_type: "n1-standard-2"
mode: ha mode: ha
# Deployment settings # Deployment settings
kubeadm_control_plane: true
kubeadm_certificate_key: 3998c58db6497dd17d909394e62d515368c06ec617710d02edea31c06d741085
kube_network_plugin: flannel kube_network_plugin: flannel
helm_enabled: true helm_enabled: true
kubernetes_audit: true kubernetes_audit: true