Merge branch 'kubernetes-sigs:master' into master

pull/10939/head
peterw 2024-05-08 17:25:47 +01:00 committed by GitHub
commit bfac7d8b3e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
111 changed files with 1337 additions and 1372 deletions

View File

@ -36,3 +36,4 @@ exclude_paths:
# Generated files
- tests/files/custom_cni/cilium.yaml
- venv
- .github

7
.github/dependabot.yml vendored 100644
View File

@ -0,0 +1,7 @@
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"
labels: [ "dependencies" ]

View File

@ -174,6 +174,11 @@ packet_almalinux8-docker:
extends: .packet_pr
when: on_success
packet_amazon-linux-2-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora38-docker-weave:
stage: deploy-part2
extends: .packet_pr
@ -240,11 +245,6 @@ packet_fedora37-calico-swap-selinux:
extends: .packet_pr
when: manual
packet_amazon-linux-2-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_almalinux8-calico-nodelocaldns-secondary:
stage: deploy-part2
extends: .packet_pr

View File

@ -3,6 +3,7 @@ extends: default
ignore: |
.git/
.github/
# Generated file
tests/files/custom_cni/cilium.yaml

View File

@ -160,15 +160,15 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components
- Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.2
- [etcd](https://github.com/etcd-io/etcd) v3.5.10
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.4
- [etcd](https://github.com/etcd-io/etcd) v3.5.12
- [docker](https://www.docker.com/) v24.0 (see [Note](#container-runtime-notes))
- [containerd](https://containerd.io/) v1.7.13
- [containerd](https://containerd.io/) v1.7.16
- [cri-o](http://cri-o.io/) v1.29.1 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
- [calico](https://github.com/projectcalico/calico) v3.27.2
- [cilium](https://github.com/cilium/cilium) v1.13.4
- [calico](https://github.com/projectcalico/calico) v3.27.3
- [cilium](https://github.com/cilium/cilium) v1.15.4
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0

11
Vagrantfile vendored
View File

@ -27,7 +27,8 @@ SUPPORTED_OS = {
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
@ -185,6 +186,14 @@ Vagrant.configure("2") do |config|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
end
end
node.vm.provider :virtualbox do |vb|
# always make /dev/sd{a/b/c} so that CI can ensure that
# virtualbox and libvirt will have the same devices to use for OSDs
(1..$kube_node_instances_with_disks_number).each do |d|
vb.customize ['createhd', '--filename', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--size', $kube_node_instances_with_disks_size] # 10GB disk
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', d, '--device', 0, '--type', 'hdd', '--medium', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--nonrotational', 'on', '--mtype', 'normal']
end
end
end
if $expose_docker_tcp

View File

@ -99,7 +99,7 @@ cilium_operator_extra_volume_mounts:
## Choose Cilium version
```yml
cilium_version: v1.12.1
cilium_version: v1.15.4
```
## Add variable to config

View File

@ -35,13 +35,20 @@ containerd_registries_mirrors:
skip_verify: false
```
`containerd_registries_mirrors` is ignored for pulling images when `image_command_tool=nerdctl`
(the default for `container_manager=containerd`). Use `crictl` instead, it supports
`containerd_registries_mirrors` but lacks proper multi-arch support (see
[#8375](https://github.com/kubernetes-sigs/kubespray/issues/8375)):
containerd falls back to `https://{{ prefix }}` when none of the mirrors have the image.
This can be changed with the [`server` field](https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field):
```yaml
image_command_tool: crictl
containerd_registries_mirrors:
- prefix: docker.io
mirrors:
- host: https://mirror.gcr.io
capabilities: ["pull", "resolve"]
skip_verify: false
- host: https://registry-1.docker.io
capabilities: ["pull", "resolve"]
skip_verify: false
server: https://mirror.example.org
```
The `containerd_registries` and `containerd_insecure_registries` configs are deprecated.

View File

@ -71,6 +71,8 @@ kube_apiserver_admission_event_rate_limits:
qps: 50
burst: 100
kube_profiling: false
# Remove anonymous access to cluster
remove_anonymous_access: true
## kube-controller-manager
kube_controller_manager_bind_address: 127.0.0.1
@ -105,7 +107,7 @@ kubelet_systemd_hardening: true
# IP addresses, kubelet_secure_addresses allows you
# to specify the IP from which the kubelet
# will receive the packets.
kubelet_secure_addresses: "192.168.10.110 192.168.10.111 192.168.10.112"
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnet }} 192.168.10.110 192.168.10.111 192.168.10.112"
# additional configurations
kube_owner: root

View File

@ -76,3 +76,11 @@ In addition, [load-balancing method](https://kube-vip.io/docs/installation/flags
```yaml
kube_vip_lb_fwdmethod: masquerade
```
If you want to adjust the parameters of [kube-vip LeaderElection](https://kube-vip.io/docs/installation/flags/#environment-variables):
```yaml
kube_vip_leaseduration: 30
kube_vip_renewdeadline: 20
kube_vip_retryperiod: 4
```

View File

@ -281,6 +281,11 @@ node_taints:
* `audit_webhook_batch_max_wait`: 1s
* *kubectl_alias* - Bash alias of kubectl to interact with Kubernetes cluster much easier.
* *remove_anonymous_access* - When set to `true`, removes the `kubeadm:bootstrap-signer-clusterinfo` rolebinding created by kubeadm.
By default, kubeadm creates a rolebinding in the `kube-public` namespace which grants permissions to anonymous users. This rolebinding allows kubeadm to discover and validate cluster information during the join phase.
In a nutshell, this option removes the rolebinding after the init phase of the first control plane node and then configures kubeadm to use file discovery for the join phase of other nodes.
This option does not remove the anonymous authentication feature of the API server.
### Custom flags for Kube Components
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments.

View File

@ -14,4 +14,4 @@
## The repo and tag of the external Huawei Cloud Controller image
# external_huawei_cloud_controller_image_repo: "swr.ap-southeast-1.myhuaweicloud.com"
# external_huawei_cloud_controller_image_tag: "v0.26.6"
# external_huawei_cloud_controller_image_tag: "v0.26.8"

View File

@ -22,6 +22,16 @@
# kubectl_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
# kubelet_download_url: "{{ files_repo }}/dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
## Two options - Override entire repository or override only a single binary.
## [Optional] 1 - Override entire binary repository
# github_url: "https://my_github_proxy"
# dl_k8s_io_url: "https://my_dl_k8s_io_proxy"
# storage_googleapis_url: "https://my_storage_googleapi_proxy"
# get_helm_url: "https://my_helm_sh_proxy"
## [Optional] 2 - Override a specific binary
## CNI Plugins
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"

View File

@ -25,9 +25,9 @@
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
# external_openstack_lbaas_manage_security_groups: false
# external_openstack_lbaas_create_monitor: false
# external_openstack_lbaas_monitor_delay: 5
# external_openstack_lbaas_monitor_delay: 5s
# external_openstack_lbaas_monitor_max_retries: 1
# external_openstack_lbaas_monitor_timeout: 3
# external_openstack_lbaas_monitor_timeout: 3s
# external_openstack_lbaas_internal_lb: false
# external_openstack_network_ipv6_disabled: false
# external_openstack_network_internal_networks: []
@ -42,7 +42,7 @@
# external_openstack_application_credential_secret:
## The tag of the external OpenStack Cloud Controller image
# external_openstack_cloud_controller_image_tag: "latest"
# external_openstack_cloud_controller_image_tag: "v1.28.2"
## Tags for the Cinder CSI images
## registry.k8s.io/sig-storage/csi-attacher

View File

@ -171,6 +171,7 @@ cert_manager_enabled: false
# MetalLB deployment
metallb_enabled: false
metallb_speaker_enabled: "{{ metallb_enabled }}"
metallb_namespace: "metallb-system"
# metallb_version: v0.13.9
# metallb_protocol: "layer2"
# metallb_port: "7472"

View File

@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.29.2
kube_version: v1.29.4
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
@ -371,3 +371,6 @@ kubeadm_patches:
enabled: false
source_dir: "{{ inventory_dir }}/patches"
dest_dir: "{{ kube_config_dir }}/patches"
# Set to true to remove the role binding to anonymous users created by kubeadm
remove_anonymous_access: false

View File

@ -19,7 +19,7 @@ calico_cni_name: k8s-pod-network
# add default ippool name
# calico_pool_name: "default-pool"
# add default ippool blockSize (defaults kube_network_node_prefix)
# add default ippool blockSize
calico_pool_blocksize: 26
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)

View File

@ -1,5 +1,5 @@
---
# cilium_version: "v1.12.1"
# cilium_version: "v1.15.4"
# Log-level
# cilium_debug: false
@ -8,6 +8,9 @@
# cilium_enable_ipv4: true
# cilium_enable_ipv6: false
# Enable l2 announcement from cilium to replace Metallb Ref: https://docs.cilium.io/en/v1.14/network/l2-announcements/
cilium_l2announcements: false
# Cilium agent health port
# cilium_agent_health_port: "9879"
@ -40,6 +43,10 @@
# Overlay Network Mode
# cilium_tunnel_mode: vxlan
# LoadBalancer Mode (snat/dsr/hybrid) Ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#dsr-mode
# cilium_loadbalancer_mode: snat
# Optional features
# cilium_enable_prometheus: false
# Enable if you want to make use of hostPort mappings

View File

@ -1,9 +1,10 @@
ansible==9.3.0
ansible==9.5.1
cryptography==41.0.4
jinja2==3.1.2
jinja2==3.1.4
jmespath==1.0.1
MarkupSafe==2.1.3
netaddr==0.9.0
netaddr==1.2.1
pbr==5.11.1
ruamel.yaml==0.18.5
ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.8
jsonschema==4.22.0

View File

@ -1,37 +0,0 @@
---
# CoreOS ships without Python installed
- name: Check if bootstrap is needed
raw: stat /opt/bin/.bootstrapped
register: need_bootstrap
failed_when: false
changed_when: false
tags:
- facts
- name: Force binaries directory for Container Linux by CoreOS and Flatcar
set_fact:
bin_dir: "/opt/bin"
tags:
- facts
- name: Run bootstrap.sh
script: bootstrap.sh
become: true
environment: "{{ proxy_env }}"
when:
- need_bootstrap.rc != 0
- name: Set the ansible_python_interpreter fact
set_fact:
ansible_python_interpreter: "{{ bin_dir }}/python"
tags:
- facts
- name: Disable auto-upgrade
systemd:
name: locksmithd.service
masked: true
state: stopped
when:
- coreos_locksmithd_disable

View File

@ -55,22 +55,10 @@
raw: apt-get update --allow-releaseinfo-change
become: true
when:
- '''ID=debian'' in os_release.stdout_lines'
- '''VERSION_ID="10"'' in os_release.stdout_lines or ''VERSION_ID="11"'' in os_release.stdout_lines'
- os_release_dict['ID'] == 'debian'
- os_release_dict['VERSION_ID'] in ["10", "11"]
register: bootstrap_update_apt_result
changed_when:
- '"changed its" in bootstrap_update_apt_result.stdout'
- '"value from" in bootstrap_update_apt_result.stdout'
ignore_errors: true
- name: Set the ansible_python_interpreter fact
set_fact:
ansible_python_interpreter: "/usr/bin/python3"
# Workaround for https://github.com/ansible/ansible/issues/25543
- name: Install dbus for the hostname module
package:
name: dbus
state: present
use: apt
become: true

View File

@ -38,9 +38,3 @@
delay: 5
sleep: 5
when: need_bootstrap.rc != 0
- name: Store the fact if this is an fedora core os host
set_fact:
is_fedora_coreos: True
tags:
- facts

View File

@ -21,16 +21,10 @@
become: true
when: not skip_http_proxy_on_os_packages
- name: Install python3 on fedora
raw: "dnf install --assumeyes --quiet python3"
# libselinux-python3 is required on SELinux enabled hosts
# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements
- name: Install ansible requirements
raw: "dnf install --assumeyes python3 python3-dnf libselinux-python3"
become: true
when:
- need_bootstrap.rc != 0
# libselinux-python3 is required on SELinux enabled hosts
# See https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#managed-node-requirements
- name: Install libselinux-python3
package:
name: libselinux-python3
state: present
become: true

View File

@ -9,12 +9,6 @@
tags:
- facts
- name: Force binaries directory for Flatcar Container Linux by Kinvolk
set_fact:
bin_dir: "/opt/bin"
tags:
- facts
- name: Run bootstrap.sh
script: bootstrap.sh
become: true
@ -22,11 +16,14 @@
when:
- need_bootstrap.rc != 0
- name: Set the ansible_python_interpreter fact
# Workaround ansible https://github.com/ansible/ansible/pull/82821
# We set the interpreter rather than ansible_python_interpreter to allow
# - using virtual env with task level ansible_python_interpreter later
# - let users specify an ansible_python_interpreter in group_vars
- name: Make interpreter discovery works on Flatcar
set_fact:
ansible_python_interpreter: "{{ bin_dir }}/python"
tags:
- facts
ansible_interpreter_python_fallback: "{{ ansible_interpreter_python_fallback + [ '/opt/bin/python' ] }}"
- name: Disable auto-upgrade
systemd:

View File

@ -6,47 +6,29 @@
# This command should always run, even in check mode
check_mode: false
- name: Bootstrap CentOS
include_tasks: bootstrap-centos.yml
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines'
- name: Include distro specifics vars and tasks
vars:
os_release_dict: "{{ os_release.stdout_lines | select('regex', '^.+=.*$') | map('regex_replace', '\"', '') |
map('split', '=') | community.general.dict }}"
block:
- name: Include vars
include_vars: "{{ item }}"
tags:
- facts
with_first_found:
- &search
files:
- "{{ os_release_dict['ID'] }}-{{ os_release_dict['VARIANT_ID'] }}.yml"
- "{{ os_release_dict['ID'] }}.yml"
paths:
- vars/
skip: True
- name: Include tasks
include_tasks: "{{ item }}"
with_first_found:
- <<: *search
paths: []
- name: Bootstrap Amazon
include_tasks: bootstrap-amazon.yml
when: '''ID="amzn"'' in os_release.stdout_lines'
- name: Bootstrap RedHat
include_tasks: bootstrap-redhat.yml
when: '''ID="rhel"'' in os_release.stdout_lines'
- name: Bootstrap Clear Linux
include_tasks: bootstrap-clearlinux.yml
when: '''ID=clear-linux-os'' in os_release.stdout_lines'
# Fedora CoreOS
- name: Bootstrap Fedora CoreOS
include_tasks: bootstrap-fedora-coreos.yml
when:
- '''ID=fedora'' in os_release.stdout_lines'
- '''VARIANT_ID=coreos'' in os_release.stdout_lines'
- name: Bootstrap Flatcar
include_tasks: bootstrap-flatcar.yml
when: '''ID=flatcar'' in os_release.stdout_lines'
- name: Bootstrap Debian
include_tasks: bootstrap-debian.yml
when: '''ID=debian'' in os_release.stdout_lines or ''ID=ubuntu'' in os_release.stdout_lines'
# Fedora "classic"
- name: Boostrap Fedora
include_tasks: bootstrap-fedora.yml
when:
- '''ID=fedora'' in os_release.stdout_lines'
- '''VARIANT_ID=coreos'' not in os_release.stdout_lines'
- name: Bootstrap OpenSUSE
include_tasks: bootstrap-opensuse.yml
when: '''ID="opensuse-leap"'' in os_release.stdout_lines or ''ID="opensuse-tumbleweed"'' in os_release.stdout_lines'
- name: Create remote_tmp for it is used by another module
file:
@ -54,9 +36,7 @@
state: directory
mode: 0700
# Workaround for https://github.com/ansible/ansible/issues/42726
# (1/3)
- name: Gather host facts to get ansible_os_family
- name: Gather facts
setup:
gather_subset: '!all'
filter: ansible_*
@ -64,39 +44,12 @@
- name: Assign inventory name to unconfigured hostnames (non-CoreOS, non-Flatcar, Suse and ClearLinux, non-Fedora)
hostname:
name: "{{ inventory_hostname }}"
when:
- override_system_hostname
- ansible_os_family not in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux']
- not ansible_distribution == "Fedora"
- not is_fedora_coreos
# (2/3)
- name: Assign inventory name to unconfigured hostnames (CoreOS, Flatcar, Suse, ClearLinux and Fedora only)
command: "hostnamectl set-hostname {{ inventory_hostname }}"
register: hostname_changed
become: true
changed_when: false
when: >
override_system_hostname
and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux']
or is_fedora_coreos
or ansible_distribution == "Fedora")
# (3/3)
- name: Update hostname fact (CoreOS, Flatcar, Suse, ClearLinux and Fedora only)
setup:
gather_subset: '!all'
filter: ansible_hostname
when: >
override_system_hostname
and (ansible_os_family in ['Suse', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'ClearLinux']
or is_fedora_coreos
or ansible_distribution == "Fedora")
when: override_system_hostname
- name: Install ceph-commmon package
package:
name:
- ceph-common
- ceph-common
state: present
when: rbd_provisioner_enabled | default(false)

View File

@ -0,0 +1 @@
opensuse.yml

View File

@ -0,0 +1 @@
opensuse.yml

View File

@ -0,0 +1 @@
debian.yml

View File

@ -0,0 +1,2 @@
---
is_fedora_coreos: True

View File

@ -0,0 +1,2 @@
---
bin_dir: "/opt/bin"

View File

@ -109,3 +109,11 @@ containerd_supported_distributions:
# Enable container device interface
enable_cdi: false
# For containerd tracing configuration please check out the official documentation:
# https://github.com/containerd/containerd/blob/main/docs/tracing.md
containerd_tracing_enabled: false
containerd_tracing_endpoint: "0.0.0.0:4317"
containerd_tracing_protocol: "grpc"
containerd_tracing_sampling_ratio: 1.0
containerd_tracing_service_name: "containerd"

View File

@ -92,6 +92,18 @@ oom_score = {{ containerd_oom_score }}
disable = false
{% endif %}
{% if containerd_tracing_enabled %}
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = "{{ containerd_tracing_endpoint }}"
protocol = "{{ containerd_tracing_protocol }}"
{% if containerd_tracing_protocol == "grpc" %}
insecure = false
{% endif %}
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = {{ containerd_tracing_sampling_ratio }}
service_name = "{{ containerd_tracing_service_name }}"
{% endif %}
{% if containerd_extra_args is defined %}
{{ containerd_extra_args }}
{% endif %}

View File

@ -1,4 +1,4 @@
server = "https://{{ item.prefix }}"
server = "{{ item.server | default("https://" + item.prefix) }}"
{% for mirror in item.mirrors %}
[host."{{ mirror.host }}"]
capabilities = ["{{ ([ mirror.capabilities ] | flatten ) | join('","') }}"]

View File

@ -90,6 +90,20 @@
remote_src: true
notify: Restart crio
- name: Cri-o | configure crio to use kube reserved cgroups
ansible.builtin.copy:
dest: /etc/systemd/system/crio.service.d/00-slice.conf
owner: root
group: root
mode: '0644'
content: |
[Service]
Slice={{ kube_reserved_cgroups_for_service_slice }}
notify: Restart crio
when:
- kube_reserved is defined and kube_reserved is true
- kube_reserved_cgroups_for_service_slice is defined
- name: Cri-o | update the bin dir for crio.service file
replace:
dest: /etc/systemd/system/crio.service

View File

@ -67,6 +67,17 @@
environment: "{{ proxy_env }}"
when: ansible_pkg_mgr == 'apt'
# ref to https://github.com/kubernetes-sigs/kubespray/issues/11086
- name: Remove the archived debian apt repository
lineinfile:
path: /etc/apt/sources.list
regexp: 'buster-backports'
state: absent
backup: yes
when:
- ansible_os_family == 'Debian'
- ansible_distribution_release == "buster"
- name: Ensure docker-ce repository is enabled
apt_repository:
repo: "{{ item }}"

View File

@ -6,8 +6,8 @@
- name: Docker | Find docker packages
set_fact:
docker_packages_list: "{{ ansible_facts.packages.keys() | select('search', '^docker*') }}"
containerd_package: "{{ ansible_facts.packages.keys() | select('search', '^containerd*') }}"
docker_packages_list: "{{ ansible_facts.packages.keys() | select('search', '^docker+') }}"
containerd_package: "{{ ansible_facts.packages.keys() | select('search', '^containerd+') }}"
- name: Docker | Stop all running container
shell: "set -o pipefail && {{ docker_bin_dir }}/docker ps -q | xargs -r {{ docker_bin_dir }}/docker kill"

View File

@ -2,7 +2,7 @@
- name: Download | Prepare working directories and variables
import_tasks: prep_download.yml
when:
- not skip_downloads
- not skip_downloads | default(false)
tags:
- download
- upload
@ -10,7 +10,7 @@
- name: Download | Get kubeadm binary and list of required images
include_tasks: prep_kubeadm_images.yml
when:
- not skip_downloads
- not skip_downloads | default(false)
- inventory_hostname in groups['kube_control_plane']
tags:
- download
@ -22,44 +22,8 @@
vars:
download: "{{ download_defaults | combine(item.value) }}"
include_file: "download_{% if download.container %}container{% else %}file{% endif %}.yml"
kubeadm_images: "{{ skip_kubeadm_images | ternary({}, _kubeadm_images) }}"
# The trick (converting list of tuples to list of dicts) below come from
# https://docs.ansible.com/ansible/latest/collections/community/general/dict_filter.html#examples
_kubeadm_images: "{{ dict(names | map('regex_replace', '^(.*)', 'kubeadm_\\1') |
zip( repos | zip(_tags, _groups) |
map('zip', keys) | map('map', 'reverse') | map('community.general.dict') |
map('combine', defaults))) |
dict2items | rejectattr('key', 'in', excluded) | items2dict }}"
keys:
- repo
- tag
- groups
images: "{{ kubeadm_images_raw.stdout_lines | map('split', ':') }}"
_tags: "{{ images | map(attribute=1) }}"
repos: "{{ images | map(attribute=0) }}"
names: "{{ repos | map('split', '/') | map(attribute=-1) }}"
_groups: "{{ names | map('extract', images_groups) }}"
defaults:
enabled: true
container: true
excluded:
- kubeadm_coredns
- kubeadm_pause
images_groups:
coredns: []
pause: []
kube-proxy:
- k8s_cluster
etcd:
- etcd
kube-scheduler:
- kube_control_plane
kube-controller-manager:
- kube_control_plane
kube-apiserver:
- kube_control_plane
when:
- not skip_downloads
- not skip_downloads | default(false)
- download.enabled
- item.value.enabled
- (not (item.value.container | default(false))) or (item.value.container and download_container)

View File

@ -20,7 +20,7 @@
dest: "{{ kube_config_dir }}/kubeadm-images.yaml"
mode: 0644
when:
- not skip_kubeadm_images
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Copy kubeadm binary from download dir to system path
copy:
@ -36,9 +36,36 @@
state: file
- name: Prep_kubeadm_images | Generate list of required images
command: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml"
shell: "set -o pipefail && {{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -Ev 'coredns|pause'"
args:
executable: /bin/bash
register: kubeadm_images_raw
run_once: true
changed_when: false
when:
- not skip_kubeadm_images
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Parse list of images
vars:
kubeadm_images_list: "{{ kubeadm_images_raw.stdout_lines }}"
set_fact:
kubeadm_image:
key: "kubeadm_{{ (item | regex_replace('^(?:.*\\/)*', '')).split(':')[0] }}"
value:
enabled: true
container: true
repo: "{{ item | regex_replace('^(.*):.*$', '\\1') }}"
tag: "{{ item | regex_replace('^.*:(.*)$', '\\1') }}"
groups: k8s_cluster
loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
register: kubeadm_images_cooked
run_once: true
when:
- not skip_kubeadm_images | default(false)
- name: Prep_kubeadm_images | Convert list of images to dict for later use
set_fact:
kubeadm_images: "{{ kubeadm_images_cooked.results | map(attribute='ansible_facts.kubeadm_image') | list | items2dict }}"
run_once: true
when:
- not skip_kubeadm_images | default(false)

View File

@ -7,3 +7,4 @@ helm_defaults:
helm_repository_defaults:
binary_path: "{{ bin_dir }}/helm"
force_update: true

View File

@ -20,7 +20,7 @@ spec:
spec:
nodeSelector:
{{ nodelocaldns_ds_nodeselector }}
priorityClassName: system-cluster-critical
priorityClassName: system-node-critical
serviceAccountName: nodelocaldns
hostNetwork: true
dnsPolicy: Default # Don't use cluster DNS.

View File

@ -16,4 +16,4 @@ external_huaweicloud_cloud: "{{ lookup('env','OS_CLOUD') }}"
## arg2: "value2"
external_huawei_cloud_controller_extra_args: {}
external_huawei_cloud_controller_image_repo: "swr.ap-southeast-1.myhuaweicloud.com"
external_huawei_cloud_controller_image_tag: "v0.26.6"
external_huawei_cloud_controller_image_tag: "v0.26.8"

View File

@ -21,3 +21,6 @@ subnet-id={{ external_huaweicloud_lbaas_subnet_id }}
{% if external_huaweicloud_lbaas_network_id is defined %}
id={{ external_huaweicloud_lbaas_network_id }}
{% endif %}
{% if external_huaweicloud_security_group_id is defined %}
security-group-id={{ external_huaweicloud_security_group_id }}
{% endif %}

View File

@ -47,6 +47,11 @@ spec:
- --cloud-config=$(CLOUD_CONFIG)
- --cloud-provider=huaweicloud
- --use-service-account-credentials=true
- --node-status-update-frequency=5s
- --node-monitor-period=5s
- --leader-elect-lease-duration=30s
- --leader-elect-renew-deadline=20s
- --leader-elect-retry-period=2s
{% for key, value in external_huawei_cloud_controller_extra_args.items() %}
- "{{ '--' + key + '=' + value }}"
{% endfor %}

View File

@ -1,16 +1,12 @@
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
kind: List
metadata: {}
namespace: kube-system

View File

@ -1,117 +1,113 @@
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
rules:
- resources:
- tokenreviews
- tokenreviews
verbs:
- get
- list
- watch
- create
- update
- patch
- get
- list
- watch
- create
- update
- patch
apiGroups:
- authentication.k8s.io
- authentication.k8s.io
- resources:
- configmaps
- endpoints
- pods
- services
- secrets
- serviceaccounts
- serviceaccounts/token
- configmaps
- endpoints
- pods
- services
- secrets
- serviceaccounts
- serviceaccounts/token
verbs:
- get
- list
- watch
- create
- update
- patch
- get
- list
- watch
- create
- update
- patch
apiGroups:
- ''
- ''
- resources:
- nodes
- nodes
verbs:
- get
- list
- watch
- delete
- patch
- update
- get
- list
- watch
- delete
- patch
- update
apiGroups:
- ''
- ''
- resources:
- services/status
- pods/status
- services/status
- pods/status
verbs:
- update
- patch
- update
- patch
apiGroups:
- ''
- ''
- resources:
- nodes/status
- nodes/status
verbs:
- patch
- update
- patch
- update
apiGroups:
- ''
- ''
- resources:
- events
- endpoints
- events
- endpoints
verbs:
- create
- patch
- update
- create
- patch
- update
apiGroups:
- ''
- ''
- resources:
- leases
- leases
verbs:
- get
- update
- create
- delete
- get
- update
- create
- delete
apiGroups:
- coordination.k8s.io
- coordination.k8s.io
- resources:
- customresourcedefinitions
- customresourcedefinitions
verbs:
- get
- update
- create
- delete
- get
- update
- create
- delete
apiGroups:
- apiextensions.k8s.io
- resources:
- ingresses
- ingresses
verbs:
- get
- list
- watch
- update
- create
- patch
- delete
- get
- list
- watch
- update
- create
- patch
- delete
apiGroups:
- networking.k8s.io
- networking.k8s.io
- resources:
- ingresses/status
- ingresses/status
verbs:
- update
- patch
- update
- patch
apiGroups:
- networking.k8s.io
- networking.k8s.io
- resources:
- endpointslices
- endpointslices
verbs:
- get
- list
- watch
- get
- list
- watch
apiGroups:
- discovery.k8s.io
kind: List
metadata: {}
- discovery.k8s.io

View File

@ -21,5 +21,5 @@ external_openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}"
## arg1: "value1"
## arg2: "value2"
external_openstack_cloud_controller_extra_args: {}
external_openstack_cloud_controller_image_tag: "v1.25.3"
external_openstack_cloud_controller_image_tag: "v1.28.2"
external_openstack_cloud_controller_bind_address: 127.0.0.1

View File

@ -36,7 +36,7 @@ spec:
serviceAccountName: cloud-controller-manager
containers:
- name: openstack-cloud-controller-manager
image: {{ docker_image_repo }}/k8scloudprovider/openstack-cloud-controller-manager:{{ external_openstack_cloud_controller_image_tag }}
image: {{ external_openstack_cloud_controller_image_repo }}:{{ external_openstack_cloud_controller_image_tag }}
args:
- /bin/openstack-cloud-controller-manager
- --v=1

View File

@ -19,5 +19,6 @@ ingress_nginx_without_class: true
ingress_nginx_default: false
ingress_nginx_webhook_enabled: false
ingress_nginx_webhook_job_ttl: 1800
ingress_nginx_opentelemetry_enabled: false
ingress_nginx_probe_initial_delay_seconds: 10

View File

@ -23,6 +23,26 @@ spec:
spec:
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: {{ ingress_nginx_termination_grace_period_seconds }}
{% if ingress_nginx_opentelemetry_enabled %}
initContainers:
- name: opentelemetry
command:
- /init_module
image: {{ ingress_nginx_opentelemetry_image_repo }}:{{ ingress_nginx_opentelemetry_image_tag }}
securityContext:
runAsNonRoot: true
runAsUser: 65532
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /modules_mount
name: modules
{% endif %}
{% if ingress_nginx_host_network %}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
@ -127,15 +147,27 @@ spec:
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
{% if ingress_nginx_webhook_enabled %}
{% if ingress_nginx_webhook_enabled or ingress_nginx_opentelemetry_enabled %}
volumeMounts:
{% if ingress_nginx_webhook_enabled %}
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
{% endif %}
{% if ingress_nginx_webhook_enabled %}
{% if ingress_nginx_opentelemetry_enabled %}
- name: modules
mountPath: /modules_mount
{% endif %}
{% endif %}
{% if ingress_nginx_webhook_enabled or ingress_nginx_opentelemetry_enabled %}
volumes:
{% if ingress_nginx_webhook_enabled %}
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
{% endif %}
{% if ingress_nginx_opentelemetry_enabled %}
- name: modules
emptyDir: {}
{% endif %}
{% endif %}

View File

@ -5,7 +5,7 @@ kubelet_csr_approver_namespace: kube-system
kubelet_csr_approver_repository_name: kubelet-csr-approver
kubelet_csr_approver_repository_url: https://postfinance.github.io/kubelet-csr-approver
kubelet_csr_approver_chart_ref: "{{ kubelet_csr_approver_repository_name }}/kubelet-csr-approver"
kubelet_csr_approver_chart_version: 0.2.8
kubelet_csr_approver_chart_version: 1.1.0
# Fill values override here
# See upstream https://github.com/postfinance/kubelet-csr-approver

View File

@ -33,7 +33,7 @@
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Wait for MetalLB controller to be running
command: "{{ bin_dir }}/kubectl rollout status -n metallb-system deployment -l app=metallb,component=controller --timeout=2m"
command: "{{ bin_dir }}/kubectl rollout status -n {{ metallb_namespace }} deployment -l app=metallb,component=controller --timeout=2m"
become: true
when:
- inventory_hostname == groups['kube_control_plane'][0]
@ -104,5 +104,5 @@
name: config
kubectl: "{{ bin_dir }}/kubectl"
resource: ConfigMap
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
state: absent

View File

@ -11,7 +11,7 @@ apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: "{{ entry }}"
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
ipAddressPools:
- "{{ entry }}"

View File

@ -9,7 +9,7 @@ apiVersion: metallb.io/v1beta1
kind: Community
metadata:
name: "{{ community_name }}"
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
communities:
- name: "{{ community_name }}"
@ -21,7 +21,7 @@ apiVersion: metallb.io/v1beta1
kind: Community
metadata:
name: well-known
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
communities:
- name: no-export
@ -51,7 +51,7 @@ apiVersion: metallb.io/v1beta1
kind: BGPAdvertisement
metadata:
name: "{{ peer_name }}-local"
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
aggregationLength: 32
aggregationLengthV6: 128
@ -70,7 +70,7 @@ apiVersion: metallb.io/v1beta1
kind: BGPAdvertisement
metadata:
name: "{{ peer_name }}-external"
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
{% if peer.aggregation_length is defined and peer.aggregation_length <= 30 %}
aggregationLength: {{ peer.aggregation_length }}
@ -93,7 +93,7 @@ apiVersion: metallb.io/v1beta2
kind: BGPPeer
metadata:
name: "{{ peer_name }}"
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
myASN: {{ peer.my_asn }}
peerASN: {{ peer.peer_asn }}

View File

@ -6,7 +6,7 @@ metadata:
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
name: metallb-system
name: {{ metallb_namespace }}
---
apiVersion: apiextensions.k8s.io/v1
@ -23,7 +23,7 @@ spec:
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /convert
conversionReviewVersions:
- v1alpha1
@ -544,7 +544,7 @@ spec:
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlGWlRDQ0EwMmdBd0lCQWdJVU5GRW1XcTM3MVpKdGkrMmlSQzk1WmpBV1MxZ3dEUVlKS29aSWh2Y05BUUVMDQpCUUF3UWpFTE1Ba0dBMVVFQmhNQ1dGZ3hGVEFUQmdOVkJBY01ERVJsWm1GMWJIUWdRMmwwZVRFY01Cb0dBMVVFDQpDZ3dUUkdWbVlYVnNkQ0JEYjIxd1lXNTVJRXgwWkRBZUZ3MHlNakEzTVRrd09UTXlNek5hRncweU1qQTRNVGd3DQpPVE15TXpOYU1FSXhDekFKQmdOVkJBWVRBbGhZTVJVd0V3WURWUVFIREF4RVpXWmhkV3gwSUVOcGRIa3hIREFhDQpCZ05WQkFvTUUwUmxabUYxYkhRZ1EyOXRjR0Z1ZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDDQpEd0F3Z2dJS0FvSUNBUUNxVFpxMWZRcC9vYkdlenhES0o3OVB3Ny94azJwellualNzMlkzb1ZYSm5sRmM4YjVlDQpma2ZZQnY2bndscW1keW5PL2phWFBaQmRQSS82aFdOUDBkdVhadEtWU0NCUUpyZzEyOGNXb3F0MGNTN3pLb1VpDQpvcU1tQ0QvRXVBeFFNZjhRZDF2c1gvVllkZ0poVTZBRXJLZEpIaXpFOUJtUkNkTDBGMW1OVW55Rk82UnRtWFZUDQpidkxsTDVYeTc2R0FaQVBLOFB4aVlDa0NtbDdxN0VnTWNiOXlLWldCYmlxQ3VkTXE5TGJLNmdKNzF6YkZnSXV4DQo1L1pXK2JraTB2RlplWk9ZODUxb1psckFUNzJvMDI4NHNTWW9uN0pHZVZkY3NoUnh5R1VpSFpSTzdkaXZVTDVTDQpmM2JmSDFYbWY1ZDQzT0NWTWRuUUV2NWVaOG8zeWVLa3ZrbkZQUGVJMU9BbjdGbDlFRVNNR2dhOGFaSG1URSttDQpsLzlMSmdDYjBnQmtPT0M0WnV4bWh2aERKV1EzWnJCS3pMQlNUZXN0NWlLNVlwcXRWVVk2THRyRW9FelVTK1lsDQpwWndXY2VQWHlHeHM5ZURsR3lNVmQraW15Y3NTU1UvVno2Mmx6MnZCS21NTXBkYldDQWhud0RsRTVqU2dyMjRRDQp0eGNXLys2N3d5KzhuQlI3UXdqVTFITndVRjBzeERWdEwrZ1NHVERnSEVZSlhZelYvT05zMy94TkpoVFNPSkxNDQpoeXNVdyttaGdackdhbUdXcHVIVU1DUitvTWJzMTc1UkcrQjJnUFFHVytPTjJnUTRyOXN2b0ZBNHBBQm8xd1dLDQpRYjRhY3pmeVVscElBOVFoSmFsZEY3S3dPSHVlV3gwRUNrNXg0T2tvVDBvWVp0dzFiR0JjRGtaSmF3SURBUUFCDQpvMU13VVRBZEJnTlZIUTRFRmdRVW90UlNIUm9IWTEyRFZ4R0NCdEhpb1g2ZmVFQXdId1lEVlIwakJCZ3dGb0FVDQpvdFJTSFJvSFkxMkRWeEdDQnRIaW9YNmZlRUF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFOQmdrcWhraUc5dzBCDQpBUXNGQUFPQ0FnRUFSbkpsWWRjMTFHd0VxWnh6RDF2R3BDR2pDN2VWTlQ3aVY1d3IybXlybHdPYi9aUWFEa0xYDQpvVStaOVVXT1VlSXJTdzUydDdmQUpvVVAwSm5iYkMveVIrU1lqUGhvUXNiVHduOTc2ZldBWTduM3FMOXhCd1Y0DQphek41OXNjeUp0dlhMeUtOL2N5ak1ReDRLajBIMFg0bWJ6bzVZNUtzWWtYVU0vOEFPdWZMcEd0S1NGVGgrSEFDDQpab1Q5YnZHS25adnNHd0tYZFF0Wnh0akhaUjVqK3U3ZGtQOTJBT051RFNabS8rWVV4b2tBK09JbzdSR3BwSHNXDQo1ZTdNY0FTVXRtb1FORXd6dVFoVkJaRWQ1OGtKYjUrV0VWbGNzanlXNnRTbzErZ25tTWNqR1BsMWgxR2hVbjV4DQpFY0lWRnBIWXM5YWo1NmpBSjk1MVQvZjhMaWxmTlVnanBLQ0c1bnl0SUt3emxhOHNtdGlPdm1UNEpYbXBwSkI2DQo4bmdHRVluVjUrUTYwWFJ2OEhSSGp1VG9CRHVhaERrVDA2R1JGODU1d09FR2V4bkZpMXZYWUxLVllWb1V2MXRKDQo4dVdUR1pwNllDSVJldlBqbzg5ZytWTlJSaVFYUThJd0dybXE5c0RoVTlqTjA0SjdVL1RvRDFpNHE3VnlsRUc5DQorV1VGNkNLaEdBeTJIaEhwVncyTGFoOS9lUzdZMUZ1YURrWmhPZG1laG1BOCtqdHNZamJadnR5Mm1SWlF0UUZzDQpUU1VUUjREbUR2bVVPRVRmeStpRHdzK2RkWXVNTnJGeVVYV2dkMnpBQU4ydVl1UHFGY2pRcFNPODFzVTJTU3R3DQoxVzAyeUtYOGJEYmZFdjBzbUh3UzliQnFlSGo5NEM1Mjg0YXpsdTBmaUdpTm1OUEM4ckJLRmhBPQ0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /convert
conversionReviewVersions:
- v1beta1
@ -1291,7 +1291,7 @@ metadata:
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
name: controller
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
{% if metallb_speaker_enabled %}
---
@ -1301,7 +1301,7 @@ metadata:
labels:
app: metallb
name: speaker
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
{% endif %}
---
apiVersion: rbac.authorization.k8s.io/v1
@ -1310,7 +1310,7 @@ metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
rules:
- apiGroups:
- ""
@ -1402,7 +1402,7 @@ metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
rules:
- apiGroups:
- ""
@ -1480,7 +1480,7 @@ kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:controller
name: {{ metallb_namespace }}:controller
rules:
- apiGroups:
- ""
@ -1561,7 +1561,7 @@ kind: ClusterRole
metadata:
labels:
app: metallb
name: metallb-system:speaker
name: {{ metallb_namespace }}:speaker
rules:
- apiGroups:
- ""
@ -1598,7 +1598,7 @@ metadata:
labels:
app: metallb
name: controller
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -1606,7 +1606,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
---
apiVersion: rbac.authorization.k8s.io/v1
@ -1615,7 +1615,7 @@ metadata:
labels:
app: metallb
name: pod-lister
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -1623,7 +1623,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
---
apiVersion: rbac.authorization.k8s.io/v1
@ -1631,15 +1631,15 @@ kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:controller
name: {{ metallb_namespace }}:controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:controller
name: {{ metallb_namespace }}:controller
subjects:
- kind: ServiceAccount
name: controller
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
{% if metallb_speaker_enabled %}
---
@ -1648,15 +1648,15 @@ kind: ClusterRoleBinding
metadata:
labels:
app: metallb
name: metallb-system:speaker
name: {{ metallb_namespace }}:speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-system:speaker
name: {{ metallb_namespace }}:speaker
subjects:
- kind: ServiceAccount
name: speaker
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
{% endif %}
---
@ -1664,14 +1664,14 @@ apiVersion: v1
kind: Secret
metadata:
name: webhook-server-cert
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
---
apiVersion: v1
kind: Service
metadata:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
ports:
- port: 443
@ -1687,7 +1687,7 @@ metadata:
app: metallb
component: controller
name: controller
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
revisionHistoryLimit: 3
selector:
@ -1782,7 +1782,7 @@ metadata:
app: metallb
component: speaker
name: speaker
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
spec:
selector:
matchLabels:
@ -1888,7 +1888,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta2-bgppeer
failurePolicy: Fail
name: bgppeersvalidationwebhook.metallb.io
@ -1908,7 +1908,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta1-addresspool
failurePolicy: Fail
name: addresspoolvalidationwebhook.metallb.io
@ -1928,7 +1928,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta1-bfdprofile
failurePolicy: Fail
name: bfdprofilevalidationwebhook.metallb.io
@ -1948,7 +1948,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta1-bgpadvertisement
failurePolicy: Fail
name: bgpadvertisementvalidationwebhook.metallb.io
@ -1968,7 +1968,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta1-community
failurePolicy: Fail
name: communityvalidationwebhook.metallb.io
@ -1988,7 +1988,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta1-ipaddresspool
failurePolicy: Fail
name: ipaddresspoolvalidationwebhook.metallb.io
@ -2008,7 +2008,7 @@ webhooks:
clientConfig:
service:
name: webhook-service
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
path: /validate-metallb-io-v1beta1-l2advertisement
failurePolicy: Fail
name: l2advertisementvalidationwebhook.metallb.io

View File

@ -9,7 +9,7 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
namespace: metallb-system
namespace: "{{ metallb_namespace }}"
name: "{{ pool_name }}"
spec:
addresses:

View File

@ -15,7 +15,6 @@ metadata:
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# rename if there are conflicts
name: snapshot-controller-runner
rules:
- apiGroups: [""]
@ -24,9 +23,6 @@ rules:
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
@ -35,13 +31,37 @@ rules:
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
verbs: ["get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
verbs: ["update", "patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshotcontents/status"]
verbs: ["patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshots"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["groupsnapshot.storage.k8s.io"]
resources: ["volumegroupsnapshots/status"]
verbs: ["update", "patch"]
# Enable this RBAC rule only when using distributed snapshotting, i.e. when the enable-distributed-snapshotting flag is set to true
# - apiGroups: [""]
# resources: ["nodes"]
# verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
@ -54,7 +74,6 @@ subjects:
namespace: {{ snapshot_controller_namespace }}
roleRef:
kind: ClusterRole
# change the name also here if the ClusterRole gets renamed
name: snapshot-controller-runner
apiGroup: rbac.authorization.k8s.io
@ -62,12 +81,12 @@ roleRef:
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: {{ snapshot_controller_namespace }}
name: snapshot-controller-leaderelection
namespace: {{ snapshot_controller_namespace }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding

View File

@ -15,11 +15,12 @@ spec:
replicas: {{ snapshot_controller_replicas }}
selector:
matchLabels:
app: snapshot-controller
# the snapshot controller won't be marked as ready if the v1 CRDs are unavailable
# in #504 the snapshot-controller will exit after around 7.5 seconds if it
# can't find the v1 CRDs so this value should be greater than that
minReadySeconds: 15
app.kubernetes.io/name: snapshot-controller
# The snapshot controller won't be marked as ready if the v1 CRDs are unavailable.
# The flag --retry-crd-interval-max is used to determine how long the controller
# will wait for the CRDs to become available before exiting. The default is 30 seconds
# so minReadySeconds should be set slightly higher than the flag value.
minReadySeconds: 35
strategy:
rollingUpdate:
maxSurge: 0
@ -28,13 +29,13 @@ spec:
template:
metadata:
labels:
app: snapshot-controller
app.kubernetes.io/name: snapshot-controller
spec:
serviceAccount: snapshot-controller
serviceAccountName: snapshot-controller
containers:
- name: snapshot-controller
image: {{ snapshot_controller_image_repo }}:{{ snapshot_controller_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- "--v=5"
- "--leader-election=false"
imagePullPolicy: {{ k8s_image_pull_policy }}
- "--leader-election={{ 'true' if snapshot_controller_replicas > 1 else 'false' }}"

View File

@ -240,3 +240,6 @@ kubeadm_upgrade_auto_cert_renewal: true
kube_apiserver_tracing: false
kube_apiserver_tracing_endpoint: 0.0.0.0:4317
kube_apiserver_tracing_sampling_rate_per_million: 100
# Enable kubeadm file discovery if anonymous access has been removed
kubeadm_use_file_discovery: "{{ remove_anonymous_access }}"

View File

@ -9,7 +9,7 @@
- name: Set fact joined_control_planes
set_fact:
joined_control_planes: "{{ ((kube_control_planes_raw.stdout | from_json)['items']) | default([]) | map(attribute='metadata') | map(attribute='name') | list }}"
delegate_to: item
delegate_to: "{{ item }}"
loop: "{{ groups['kube_control_plane'] }}"
when: kube_control_planes_raw is succeeded
run_once: yes

View File

@ -63,6 +63,26 @@
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
- not kube_external_ca_mode
- name: Get kubeconfig for join discovery process
command: "{{ kubectl }} -n kube-public get cm cluster-info -o jsonpath='{.data.kubeconfig}'"
register: kubeconfig_file_discovery
run_once: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
when:
- kubeadm_use_file_discovery
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
- name: Copy discovery kubeconfig
copy:
dest: "{{ kube_config_dir }}/cluster-info-discovery-kubeconfig.yaml"
content: "{{ kubeconfig_file_discovery.stdout }}"
owner: "root"
mode: 0644
when:
- inventory_hostname != first_kube_control_plane
- kubeadm_use_file_discovery
- kubeadm_already_run is not defined or not kubeadm_already_run.stat.exists
- name: Joining control plane node to the cluster.
command: >-
{{ bin_dir }}/kubeadm join

View File

@ -221,12 +221,16 @@
{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create {{ kubeadm_token }}
changed_when: false
when:
- inventory_hostname == first_kube_control_plane
- inventory_hostname == first_kube_control_plane
- kubeadm_token is defined
- kubeadm_refresh_token
tags:
- kubeadm_token
- name: Remove binding to anonymous user
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"
when: inventory_hostname == first_kube_control_plane and remove_anonymous_access
- name: Create kubeadm token for joining nodes with 24h expiration (default)
command: "{{ bin_dir }}/kubeadm --kubeconfig {{ kube_config_dir }}/admin.conf token create"
changed_when: false

View File

@ -53,6 +53,10 @@
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
notify: Master | restart kubelet
- name: Kubeadm | Remove binding to anonymous user
command: "{{ kubectl }} -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo --ignore-not-found"
when: remove_anonymous_access
- name: Kubeadm | clean kubectl cache to refresh api types
file:
path: "{{ item }}"

View File

@ -1,6 +1,10 @@
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
{% if kubeadm_use_file_discovery %}
file:
kubeConfigPath: {{ kube_config_dir }}/cluster-info-discovery-kubeconfig.yaml
{% else %}
bootstrapToken:
{% if kubeadm_config_api_fqdn is defined %}
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
@ -9,6 +13,7 @@ discovery:
{% endif %}
token: {{ kubeadm_token }}
unsafeSkipCAVerification: true
{% endif %}
timeout: {{ discovery_timeout }}
tlsBootstrapToken: {{ kubeadm_token }}
controlPlane:

View File

@ -4,6 +4,9 @@
discovery_timeout: 60s
kubeadm_join_timeout: 120s
# Enable kubeadm file discovery if anonymous access has been removed
kubeadm_use_file_discovery: "{{ remove_anonymous_access }}"
# If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}

View File

@ -57,6 +57,24 @@
set_fact:
kubeadmConfig_api_version: v1beta3
- name: Get kubeconfig for join discovery process
command: "{{ kubectl }} -n kube-public get cm cluster-info -o jsonpath='{.data.kubeconfig}'"
register: kubeconfig_file_discovery
run_once: true
delegate_to: "{{ groups['kube_control_plane'] | first }}"
when: kubeadm_use_file_discovery
- name: Copy discovery kubeconfig
copy:
dest: "{{ kube_config_dir }}/cluster-info-discovery-kubeconfig.yaml"
content: "{{ kubeconfig_file_discovery.stdout }}"
owner: "root"
mode: 0644
when:
- not is_kube_master
- not kubelet_conf.stat.exists
- kubeadm_use_file_discovery
- name: Create kubeadm client config
template:
src: "kubeadm-client.conf.{{ kubeadmConfig_api_version }}.j2"

View File

@ -2,6 +2,10 @@
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
{% if kubeadm_use_file_discovery %}
file:
kubeConfigPath: {{ kube_config_dir }}/cluster-info-discovery-kubeconfig.yaml
{% else %}
bootstrapToken:
{% if kubeadm_config_api_fqdn is defined %}
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
@ -14,6 +18,7 @@ discovery:
- sha256:{{ kubeadm_ca_hash.stdout }}
{% else %}
unsafeSkipCAVerification: true
{% endif %}
{% endif %}
timeout: {{ discovery_timeout }}
tlsBootstrapToken: {{ kubeadm_token }}

View File

@ -24,10 +24,11 @@ kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
kubelet_systemd_hardening: false
# List of secure IPs for kubelet
kubelet_secure_addresses: >-
{%- for host in groups['kube_control_plane'] -%}
kube_node_addresses: >-
{%- for host in (groups['kube_control_plane'] + groups['kube_node'] + groups['etcd']) | unique -%}
{{ hostvars[host]['ip'] | default(fallback_ips[host]) }}{{ ' ' if not loop.last else '' }}
{%- endfor -%}
kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnet }} {{ kube_node_addresses }}"
# Reserve this space for kube resources
# Set to true to reserve resources for kube daemons
@ -87,6 +88,9 @@ kube_vip_address:
kube_vip_enableServicesElection: false
kube_vip_lb_enable: false
kube_vip_lb_fwdmethod: local
kube_vip_leaseduration: 5
kube_vip_renewdeadline: 3
kube_vip_retryperiod: 1
# Requests for load balancer app
loadbalancer_apiserver_memory_requests: 32M
@ -258,4 +262,4 @@ conntrack_modules:
## Enable distributed tracing for kubelet
kubelet_tracing: false
kubelet_tracing_endpoint: 0.0.0.0:4317
kubelet_tracing_sampling_rate_per_million: 100
kubelet_tracing_sampling_rate_per_million: 100

View File

@ -64,7 +64,7 @@ clusterDNS:
kubeReservedCgroup: {{ kube_reserved_cgroups }}
kubeReserved:
{% if is_kube_master | bool %}
cpu: {{ kube_master_cpu_reserved }}
cpu: "{{ kube_master_cpu_reserved }}"
memory: {{ kube_master_memory_reserved }}
{% if kube_master_ephemeral_storage_reserved is defined %}
ephemeral-storage: {{ kube_master_ephemeral_storage_reserved }}
@ -73,7 +73,7 @@ kubeReserved:
pid: "{{ kube_master_pid_reserved }}"
{% endif %}
{% else %}
cpu: {{ kube_cpu_reserved }}
cpu: "{{ kube_cpu_reserved }}"
memory: {{ kube_memory_reserved }}
{% if kube_ephemeral_storage_reserved is defined %}
ephemeral-storage: {{ kube_ephemeral_storage_reserved }}
@ -87,7 +87,7 @@ kubeReserved:
systemReservedCgroup: {{ system_reserved_cgroups }}
systemReserved:
{% if is_kube_master | bool %}
cpu: {{ system_master_cpu_reserved }}
cpu: "{{ system_master_cpu_reserved }}"
memory: {{ system_master_memory_reserved }}
{% if system_master_ephemeral_storage_reserved is defined %}
ephemeral-storage: {{ system_master_ephemeral_storage_reserved }}
@ -96,7 +96,7 @@ systemReserved:
pid: "{{ system_master_pid_reserved }}"
{% endif %}
{% else %}
cpu: {{ system_cpu_reserved }}
cpu: "{{ system_cpu_reserved }}"
memory: {{ system_memory_reserved }}
{% if system_ephemeral_storage_reserved is defined %}
ephemeral-storage: {{ system_ephemeral_storage_reserved }}

View File

@ -48,11 +48,11 @@ spec:
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
value: {{ kube_vip_leaseduration | string | to_json }}
- name: vip_renewdeadline
value: "3"
value: {{ kube_vip_renewdeadline | string | to_json }}
- name: vip_retryperiod
value: "1"
value: {{ kube_vip_retryperiod | string | to_json }}
{% endif %}
{% if kube_vip_bgp_enabled %}
- name: bgp_enable

View File

@ -6,18 +6,6 @@ epel_enabled: false
# Kubespray sets this to true after clusterDNS is running to apply changes to the host resolv.conf
dns_late: false
common_required_pkgs:
- "{{ (ansible_distribution == 'openSUSE Tumbleweed') | ternary('openssl-1_1', 'openssl') }}"
- curl
- rsync
- socat
- unzip
- e2fsprogs
- xfsprogs
- ebtables
- bash-completion
- tar
# Set to true if your network does not support IPv6
# This may be necessary for pulling Docker images from
# GCE docker repository
@ -98,6 +86,13 @@ ntp_servers:
ntp_restrict:
- "127.0.0.1"
- "::1"
# Specify whether to filter interfaces
ntp_filter_interface: false
# Specify the interfaces
# Only takes effect when ntp_filter_interface is true
# ntp_interfaces:
# - ignore wildcard
# - listen xxx
# The NTP driftfile path
# Only takes effect when ntp_manage_config is true.
ntp_driftfile: /var/lib/ntp/ntp.drift
@ -135,15 +130,9 @@ supported_os_distributions:
# Extending some distributions into the redhat os family
redhat_os_family_extensions:
- "Kylin Linux Advanced Server"
- "openEuler"
- "UnionTech"
- "UniontechOS"
# Extending some distributions into the debian os family
debian_os_family_extensions:
- "UnionTech OS Server 20"
# Sets DNSStubListener=no, useful if you get "0.0.0.0:53: bind: address already in use"
systemd_resolved_disable_stub_listener: "{{ ansible_os_family in ['Flatcar', 'Flatcar Container Linux by Kinvolk'] }}"

View File

@ -0,0 +1,80 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://kubespray.io/internal/os_packages.schema.json",
"title": "Os packages",
"description": "Criteria for selecting packages to install on Kubernetes nodes during installation by Kubespray",
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"additionalProperties": false,
"properties": {
"enabled": {
"description": "Escape hatch to filter packages. The value is expected to be pre-resolved to a boolean by Jinja",
"type": "boolean",
"default": true
},
"groups": {
"description": "Match if the host is in one of these groups. If not specified match any host.",
"type": "array",
"minItems": 1,
"items":{
"type": "string",
"pattern": "^[0-9A-Za-z_]*$"
}
},
"os": {
"type": "object",
"description": "If not specified match any OS. Otherwise, must match by 'families' or 'distributions' to be included.",
"additionalProperties": false,
"minProperties": 1,
"properties": {
"families": {
"description": "Match if ansible_os_family is part of the list.",
"type": "array",
"minItems": 1,
"items": {
"type": "string"
}
},
"distributions": {
"type": "object",
"description": "Match if ansible_distribution match one of defined keys.",
"minProperties": 1,
"patternProperties": {
".*": {
"description": "Match if either the value is the empty hash, or one major_versions/versions/releases contains the corresponding variable ('ansible_distrbution_*')",
"type": "object",
"additionalProperties": false,
"properties": {
"major_versions": {
"type": "array",
"minItems": 1,
"items": {
"type": "string"
}
},
"versions": {
"type": "array",
"minItems": 1,
"items": {
"type": "string"
}
},
"releases": {
"type": "array",
"minItems": 1,
"items": {
"type": "string"
}
}
}
}
}
}
}
}
}
}
}
}

View File

@ -1,11 +1,4 @@
---
- name: Force binaries directory for Flatcar Container Linux by Kinvolk
set_fact:
bin_dir: "/opt/bin"
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
tags:
- facts
- name: Set os_family fact for other redhat-based operating systems
set_fact:
ansible_os_family: "RedHat"
@ -14,34 +7,6 @@
tags:
- facts
- name: Set os_family fact for other debian-based operating systems
set_fact:
ansible_os_family: "Debian"
when: ansible_distribution in debian_os_family_extensions
tags:
- facts
- name: Check if booted with ostree
stat:
path: /run/ostree-booted
get_attributes: no
get_checksum: no
get_mime: no
register: ostree
- name: Set is_fedora_coreos
lineinfile:
path: /etc/os-release
line: "VARIANT_ID=coreos"
state: present
check_mode: yes
register: os_variant_coreos
changed_when: false
- name: Set is_fedora_coreos
set_fact:
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
- name: Check resolvconf
command: which resolvconf
register: resolvconf
@ -234,20 +199,6 @@
supersede domain-name-servers {{ (nameservers | d([]) + cloud_resolver | d([])) | unique | join(', ') }};
when: dns_early and not dns_late
- name: Gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower | replace('/', '_') }}.yml"
- "{{ ansible_distribution | lower }}.yml"
- "{{ ansible_os_family | lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
- name: Set etcd vars if using kubeadm mode
set_fact:
etcd_cert_dir: "{{ kube_cert_dir }}"

View File

@ -316,3 +316,15 @@
when:
- kube_apiserver_enable_admission_plugins is defined
- kube_apiserver_enable_admission_plugins | length > 0
- name: Verify that the packages list structure is valid
ansible.utils.validate:
criteria: "{{ lookup('file', 'pkgs-schema.json') }}"
data: "{{ pkgs }}"
- name: Verify that the packages list is sorted
vars:
pkgs_lists: "{{ pkgs.keys() | list }}"
assert:
that: "pkgs_lists | sort == pkgs_lists"
fail_msg: "pkgs is not sorted: {{ pkgs_lists | ansible.utils.fact_diff(pkgs_lists | sort) }}"

View File

@ -48,20 +48,6 @@
- ansible_os_family == "RedHat"
- not is_fedora_coreos
- name: Install python3-dnf for latest RedHat versions
command: dnf install -y python3-dnf
register: dnf_task_result
until: dnf_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when:
- ansible_distribution == "Fedora"
- ansible_distribution_major_version | int >= 30
- not is_fedora_coreos
changed_when: False
tags:
- bootstrap-os
- name: Install epel-release on RHEL derivatives
package:
name: epel-release
@ -73,27 +59,28 @@
tags:
- bootstrap-os
- name: Update common_required_pkgs with ipvsadm when kube_proxy_mode is ipvs
set_fact:
common_required_pkgs: "{{ common_required_pkgs | default([]) + ['ipvsadm', 'ipset'] }}"
when: kube_proxy_mode == 'ipvs'
- name: Install packages requirements
vars:
# The json_query for selecting packages name is split for readability
# see files/pkgs-schema.json for the structure of `pkgs`
# and the matching semantics
full_query: "[? value | (enabled == null || enabled) && ( {{ filters_os }} ) && ( {{ filters_groups }} ) ].key"
filters_groups: "groups | @ == null || [? contains(`{{ group_names }}`, @)]"
filters_os: "os == null || (os | ( {{ filters_family }} ) || ( {{ filters_distro }} ))"
dquote: !unsafe '"'
# necessary to workaround Ansible escaping
filters_distro: "distributions.{{ dquote }}{{ ansible_distribution }}{{ dquote }} |
@ == `{}` ||
contains(not_null(major_versions, `[]`), '{{ ansible_distribution_major_version }}') ||
contains(not_null(versions, `[]`), '{{ ansible_distribution_version }}') ||
contains(not_null(releases, `[]`), '{{ ansible_distribution_release }}')"
filters_family: "families && contains(families, '{{ ansible_os_family }}')"
package:
name: "{{ required_pkgs | default([]) | union(common_required_pkgs | default([])) }}"
name: "{{ pkgs | dict2items | to_json|from_json | community.general.json_query(full_query) }}"
state: present
register: pkgs_task_result
until: pkgs_task_result is succeeded
retries: "{{ pkg_install_retries }}"
delay: "{{ retry_stagger | random + 3 }}"
when: not (ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_fedora_coreos)
tags:
- bootstrap-os
- name: Install ipvsadm for ClearLinux
package:
name: ipvsadm
state: present
when:
- ansible_os_family in ["ClearLinux"]
- kube_proxy_mode == 'ipvs'

View File

@ -35,6 +35,13 @@ restrict -6 default kod notrap nomodify nopeer noquery limited
restrict {{ item }}
{% endfor %}
# Needed for filtering interfaces
{% if ntp_filter_interface %}
{% for item in ntp_interfaces %}
interface {{ item }}
{% endfor %}
{% endif %}
# Needed for adding pool entries
restrict source notrap nomodify noquery

View File

@ -1,7 +0,0 @@
---
required_pkgs:
- libselinux-python
- device-mapper-libs
- nss
- conntrack-tools
- libseccomp

View File

@ -1,8 +0,0 @@
---
required_pkgs:
- "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
- device-mapper-libs
- nss
- conntrack
- container-selinux
- libseccomp

View File

@ -1,10 +0,0 @@
---
required_pkgs:
- python3-apt
- gnupg
- apt-transport-https
- software-properties-common
- conntrack
- iptables
- apparmor
- libseccomp2

View File

@ -1,11 +0,0 @@
---
required_pkgs:
- python3-apt
- gnupg
- apt-transport-https
- software-properties-common
- conntrack
- iptables
- apparmor
- libseccomp2
- mergerfs

View File

@ -1,9 +0,0 @@
---
required_pkgs:
- python-apt
- aufs-tools
- apt-transport-https
- software-properties-common
- conntrack
- apparmor
- libseccomp2

View File

@ -1,8 +0,0 @@
---
required_pkgs:
- iptables
- libselinux-python3
- device-mapper-libs
- conntrack
- container-selinux
- libseccomp

View File

@ -0,0 +1,106 @@
---
pkgs:
apparmor: &debian_family_base
os:
families:
- Debian
apt-transport-https: *debian_family_base
aufs-tools: &deb_10
groups:
- k8s_cluster
os:
distributions:
Debian:
major_versions:
- "10"
bash-completion: {}
conntrack: &deb_redhat
groups:
- k8s_cluster
os:
families:
- Debian
- RedHat
conntrack-tools:
groups:
- k8s_cluster
os:
families:
- Suse
distributions:
Amazon: {}
container-selinux: &redhat_family
groups:
- k8s_cluster
os:
families:
- RedHat
curl: {}
device-mapper:
groups:
- k8s_cluster
os:
families:
- Suse
device-mapper-libs: *redhat_family
e2fsprogs: {}
ebtables: {}
gnupg: &debian
groups:
- k8s_cluster
os:
distributions:
Debian:
major_versions:
- "11"
- "12"
ipset:
enabled: "{{ kube_proxy_mode != 'ipvs' }}"
groups:
- k8s_cluster
iptables: *deb_redhat
ipvsadm:
enabled: "{{ kube_proxy_mode == 'ipvs' }}"
groups:
- k8s_cluster
libseccomp: *redhat_family
libseccomp2:
groups:
- k8s_cluster
os:
families:
- Suse
- Debian
libselinux-python: # TODO: Handle rehat_family + major < 8
os:
distributions:
Amazon: {}
libselinux-python3:
os:
distributions:
Fedora: {}
mergerfs:
os:
distributions:
Debian:
major_versions:
- "12"
nss: *redhat_family
openssl: {}
python-apt: *deb_10
# TODO: not for debian 10
python3-apt: *debian_family_base
python3-libselinux:
os:
distributions:
RedHat: &major_redhat_like
major_versions:
- "8"
- "9"
CentOS: *major_redhat_like
rsync: {}
socat: {}
software-properties-common: *debian_family_base
tar: {}
unzip: {}
xfsprogs: {}

View File

@ -1,8 +0,0 @@
---
required_pkgs:
- "{{ ((ansible_distribution_major_version | int) < 8) | ternary('libselinux-python', 'python3-libselinux') }}"
- device-mapper-libs
- nss
- conntrack
- container-selinux
- libseccomp

View File

@ -1,5 +0,0 @@
---
required_pkgs:
- device-mapper
- conntrack-tools
- libseccomp2

View File

@ -1,8 +0,0 @@
---
required_pkgs:
- python3-apt
- apt-transport-https
- software-properties-common
- conntrack
- apparmor
- libseccomp2

File diff suppressed because it is too large Load Diff

View File

@ -75,13 +75,13 @@ image_arch: "{{ host_architecture | default('amd64') }}"
# Versions
kubeadm_version: "{{ kube_version }}"
crun_version: 1.8.5
crun_version: 1.14.4
runc_version: v1.1.12
kata_containers_version: 3.1.3
youki_version: 0.1.0
gvisor_version: 20230807
containerd_version: 1.7.13
cri_dockerd_version: 0.3.9
gvisor_version: 20240305
containerd_version: 1.7.16
cri_dockerd_version: 0.3.11
# this is relevant when container_manager == 'docker'
docker_containerd_version: 1.6.28
@ -101,7 +101,7 @@ github_image_repo: "ghcr.io"
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v3.27.2"
calico_version: "v3.27.3"
calico_ctl_version: "{{ calico_version }}"
calico_cni_version: "{{ calico_version }}"
calico_flexvol_version: "{{ calico_version }}"
@ -116,8 +116,8 @@ flannel_cni_version: "v1.1.2"
cni_version: "v1.3.0"
weave_version: 2.8.1
cilium_version: "v1.13.4"
cilium_cli_version: "v0.15.0"
cilium_version: "v1.15.4"
cilium_cli_version: "v0.16.0"
cilium_enable_hubble: false
kube_ovn_version: "v1.11.5"
@ -127,7 +127,7 @@ multus_version: "v3.8"
helm_version: "v3.14.2"
nerdctl_version: "1.7.4"
krew_version: "v0.4.4"
skopeo_version: "v1.13.2"
skopeo_version: "v1.15.0"
# Get kubernetes major version (i.e. 1.17.4 => 1.17)
kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}"
@ -139,9 +139,9 @@ pod_infra_supported_versions:
pod_infra_version: "{{ pod_infra_supported_versions[kube_major_version] }}"
etcd_supported_versions:
v1.29: "v3.5.10"
v1.28: "v3.5.10"
v1.27: "v3.5.10"
v1.29: "v3.5.12"
v1.28: "v3.5.12"
v1.27: "v3.5.12"
etcd_version: "{{ etcd_supported_versions[kube_major_version] }}"
crictl_supported_versions:
@ -152,8 +152,8 @@ crictl_version: "{{ crictl_supported_versions[kube_major_version] }}"
crio_supported_versions:
v1.29: v1.29.1
v1.28: v1.28.1
v1.27: v1.27.1
v1.28: v1.28.4
v1.27: v1.27.4
crio_version: "{{ crio_supported_versions[kube_major_version] }}"
# Scheduler plugins doesn't build for K8s 1.28 yet
@ -163,33 +163,38 @@ scheduler_plugins_supported_versions:
v1.27: v0.27.8
scheduler_plugins_version: "{{ scheduler_plugins_supported_versions[kube_major_version] }}"
yq_version: "v4.35.2"
yq_version: "v4.42.1"
github_url: https://github.com
dl_k8s_io_url: https://dl.k8s.io
storage_googleapis_url: https://storage.googleapis.com
get_helm_url: https://get.helm.sh
# Download URLs
kubelet_download_url: "https://dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
kubectl_download_url: "https://dl.k8s.io/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
kubeadm_download_url: "https://dl.k8s.io/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
etcd_download_url: "https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
calicoctl_download_url: "https://github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
ciliumcli_download_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
crio_download_url: "https://storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz"
helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
runc_download_url: "https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
crun_download_url: "https://github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
youki_download_url: "https://github.com/containers/youki/releases/download/v{{ youki_version }}/youki_{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz"
kata_containers_download_url: "https://github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
kubelet_download_url: "{{ dl_k8s_io_url }}/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
kubectl_download_url: "{{ dl_k8s_io_url }}/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
kubeadm_download_url: "{{ dl_k8s_io_url }}/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
etcd_download_url: "{{ github_url }}/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
cni_download_url: "{{ github_url }}/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
calicoctl_download_url: "{{ github_url }}/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
calico_crds_download_url: "{{ github_url }}/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
ciliumcli_download_url: "{{ github_url }}/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
crictl_download_url: "{{ github_url }}/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
crio_download_url: "{{ storage_googleapis_url }}/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz"
helm_download_url: "{{ get_helm_url }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
runc_download_url: "{{ github_url }}/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
crun_download_url: "{{ github_url }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
youki_download_url: "{{ github_url }}/containers/youki/releases/download/v{{ youki_version }}/youki_{{ youki_version | regex_replace('\\.', '_') }}_linux.tar.gz"
kata_containers_download_url: "{{ github_url }}/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
# gVisor only supports amd64 and uses x86_64 to in the download link
gvisor_runsc_download_url: "https://storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
gvisor_containerd_shim_runsc_download_url: "https://storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
nerdctl_download_url: "https://github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
krew_download_url: "https://github.com/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
containerd_download_url: "https://github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
skopeo_download_url: "https://github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
yq_download_url: "https://github.com/mikefarah/yq/releases/download/{{ yq_version }}/yq_linux_{{ image_arch }}"
gvisor_runsc_download_url: "{{ storage_googleapis_url }}/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
gvisor_containerd_shim_runsc_download_url: "{{ storage_googleapis_url }}/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
nerdctl_download_url: "{{ github_url }}/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
krew_download_url: "{{ github_url }}/kubernetes-sigs/krew/releases/download/{{ krew_version }}/krew-{{ host_os }}_{{ image_arch }}.tar.gz"
containerd_download_url: "{{ github_url }}/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
cri_dockerd_download_url: "{{ github_url }}/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
skopeo_download_url: "{{ github_url }}/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
yq_download_url: "{{ github_url }}/mikefarah/yq/releases/download/{{ yq_version }}/yq_linux_{{ image_arch }}"
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}"
cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}"
@ -276,6 +281,8 @@ kube_router_image_repo: "{{ docker_image_repo }}/cloudnativelabs/kube-router"
kube_router_image_tag: "{{ kube_router_version }}"
multus_image_repo: "{{ github_image_repo }}/k8snetworkplumbingwg/multus-cni"
multus_image_tag: "{{ multus_version }}"
external_openstack_cloud_controller_image_repo: "registry.k8s.io/provider-os/openstack-cloud-controller-manager"
external_openstack_cloud_controller_image_tag: "v1.28.2"
kube_vip_image_repo: "{{ github_image_repo }}/kube-vip/kube-vip"
kube_vip_image_tag: v0.5.12
@ -326,7 +333,9 @@ local_path_provisioner_image_repo: "{{ docker_image_repo }}/rancher/local-path-p
local_path_provisioner_image_tag: "{{ local_path_provisioner_version }}"
ingress_nginx_version: "v1.9.6"
ingress_nginx_controller_image_repo: "{{ kube_image_repo }}/ingress-nginx/controller"
ingress_nginx_opentelemetry_image_repo: "{{ kube_image_repo }}/ingress-nginx/opentelemetry"
ingress_nginx_controller_image_tag: "{{ ingress_nginx_version }}"
ingress_nginx_opentelemetry_image_tag: "v20230721-3e2062ee5"
ingress_nginx_kube_webhook_certgen_image_repo: "{{ kube_image_repo }}/ingress-nginx/kube-webhook-certgen"
ingress_nginx_kube_webhook_certgen_image_tag: "v20231011-8b53cabe0"
alb_ingress_image_repo: "{{ docker_image_repo }}/amazon/aws-alb-ingress-controller"
@ -353,9 +362,9 @@ csi_livenessprobe_image_repo: "{{ kube_image_repo }}/sig-storage/livenessprobe"
csi_livenessprobe_image_tag: "v2.5.0"
snapshot_controller_supported_versions:
v1.29: "v6.3.3"
v1.28: "v4.2.1"
v1.27: "v4.2.1"
v1.29: "v7.0.2"
v1.28: "v7.0.2"
v1.27: "v7.0.2"
snapshot_controller_image_repo: "{{ kube_image_repo }}/sig-storage/snapshot-controller"
snapshot_controller_image_tag: "{{ snapshot_controller_supported_versions[kube_major_version] }}"
@ -410,7 +419,7 @@ downloads:
tag: "{{ netcheck_server_image_tag }}"
sha256: "{{ netcheck_server_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
netcheck_agent:
enabled: "{{ deploy_netchecker }}"
@ -419,7 +428,7 @@ downloads:
tag: "{{ netcheck_agent_image_tag }}"
sha256: "{{ netcheck_agent_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
etcd:
container: "{{ etcd_deployment_type != 'host' }}"
@ -437,7 +446,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- etcd
- etcd
cni:
enabled: true
@ -450,7 +459,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
kubeadm:
enabled: true
@ -463,7 +472,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
kubelet:
enabled: true
@ -476,7 +485,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
kubectl:
enabled: true
@ -489,7 +498,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube_control_plane
- kube_control_plane
crictl:
file: true
@ -502,7 +511,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
crio:
file: true
@ -515,7 +524,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
cri_dockerd:
file: true
@ -526,11 +535,11 @@ downloads:
url: "{{ cri_dockerd_download_url }}"
unarchive: true
unarchive_extra_opts:
- --strip=1
- --strip=1
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
crun:
file: true
@ -543,7 +552,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
youki:
file: true
@ -556,7 +565,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
runc:
file: true
@ -569,7 +578,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
kata_containers:
enabled: "{{ kata_containers_enabled }}"
@ -582,7 +591,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
containerd:
enabled: "{{ container_manager == 'containerd' }}"
@ -595,7 +604,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
gvisor_runsc:
enabled: "{{ gvisor_enabled }}"
@ -608,7 +617,7 @@ downloads:
owner: "root"
mode: 755
groups:
- k8s_cluster
- k8s_cluster
gvisor_containerd_shim:
enabled: "{{ gvisor_enabled }}"
@ -621,7 +630,7 @@ downloads:
owner: "root"
mode: 755
groups:
- k8s_cluster
- k8s_cluster
nerdctl:
file: true
@ -634,7 +643,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
skopeo:
file: true
@ -647,7 +656,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube_control_plane
- kube_control_plane
cilium:
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
@ -656,7 +665,7 @@ downloads:
tag: "{{ cilium_image_tag }}"
sha256: "{{ cilium_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
cilium_operator:
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
@ -665,7 +674,7 @@ downloads:
tag: "{{ cilium_operator_image_tag }}"
sha256: "{{ cilium_operator_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
cilium_hubble_relay:
enabled: "{{ cilium_enable_hubble }}"
@ -674,7 +683,7 @@ downloads:
tag: "{{ cilium_hubble_relay_image_tag }}"
sha256: "{{ cilium_hubble_relay_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
cilium_hubble_certgen:
enabled: "{{ cilium_enable_hubble }}"
@ -683,7 +692,7 @@ downloads:
tag: "{{ cilium_hubble_certgen_image_tag }}"
sha256: "{{ cilium_hubble_certgen_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
cilium_hubble_ui:
enabled: "{{ cilium_enable_hubble }}"
@ -692,7 +701,7 @@ downloads:
tag: "{{ cilium_hubble_ui_image_tag }}"
sha256: "{{ cilium_hubble_ui_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
cilium_hubble_ui_backend:
enabled: "{{ cilium_enable_hubble }}"
@ -701,7 +710,7 @@ downloads:
tag: "{{ cilium_hubble_ui_backend_image_tag }}"
sha256: "{{ cilium_hubble_ui_backend_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
cilium_hubble_envoy:
enabled: "{{ cilium_enable_hubble }}"
@ -710,7 +719,7 @@ downloads:
tag: "{{ cilium_hubble_envoy_image_tag }}"
sha256: "{{ cilium_hubble_envoy_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
ciliumcli:
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
@ -723,7 +732,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
multus:
enabled: "{{ kube_network_plugin_multus }}"
@ -732,7 +741,7 @@ downloads:
tag: "{{ multus_image_tag }}"
sha256: "{{ multus_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
flannel:
enabled: "{{ kube_network_plugin == 'flannel' }}"
@ -741,7 +750,7 @@ downloads:
tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
flannel_init:
enabled: "{{ kube_network_plugin == 'flannel' }}"
@ -750,7 +759,7 @@ downloads:
tag: "{{ flannel_init_image_tag }}"
sha256: "{{ flannel_init_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calicoctl:
enabled: "{{ kube_network_plugin == 'calico' }}"
@ -763,7 +772,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- k8s_cluster
- k8s_cluster
calico_node:
enabled: "{{ kube_network_plugin == 'calico' }}"
@ -772,7 +781,7 @@ downloads:
tag: "{{ calico_node_image_tag }}"
sha256: "{{ calico_node_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calico_cni:
enabled: "{{ kube_network_plugin == 'calico' }}"
@ -781,7 +790,7 @@ downloads:
tag: "{{ calico_cni_image_tag }}"
sha256: "{{ calico_cni_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calico_flexvol:
enabled: "{{ kube_network_plugin == 'calico' }}"
@ -790,7 +799,7 @@ downloads:
tag: "{{ calico_flexvol_image_tag }}"
sha256: "{{ calico_flexvol_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calico_policy:
enabled: "{{ enable_network_policy and kube_network_plugin in ['calico'] }}"
@ -799,7 +808,7 @@ downloads:
tag: "{{ calico_policy_image_tag }}"
sha256: "{{ calico_policy_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calico_typha:
enabled: "{{ typha_enabled }}"
@ -808,7 +817,7 @@ downloads:
tag: "{{ calico_typha_image_tag }}"
sha256: "{{ calico_typha_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calico_apiserver:
enabled: "{{ calico_apiserver_enabled }}"
@ -817,7 +826,7 @@ downloads:
tag: "{{ calico_apiserver_image_tag }}"
sha256: "{{ calico_apiserver_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
calico_crds:
file: true
@ -828,13 +837,13 @@ downloads:
url: "{{ calico_crds_download_url }}"
unarchive: true
unarchive_extra_opts:
- "{{ '--strip=6' if (calico_version is version('v3.22.3', '<')) else '--strip=3' }}"
- "--wildcards"
- "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3', '<')) else '*/libcalico-go/config/crd/' }}"
- "{{ '--strip=6' if (calico_version is version('v3.22.3', '<')) else '--strip=3' }}"
- "--wildcards"
- "{{ '*/_includes/charts/calico/crds/kdd/' if (calico_version is version('v3.22.3', '<')) else '*/libcalico-go/config/crd/' }}"
owner: "root"
mode: "0755"
groups:
- kube_control_plane
- kube_control_plane
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
@ -843,7 +852,7 @@ downloads:
tag: "{{ weave_kube_image_tag }}"
sha256: "{{ weave_kube_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
weave_npc:
enabled: "{{ kube_network_plugin == 'weave' }}"
@ -852,7 +861,7 @@ downloads:
tag: "{{ weave_npc_image_tag }}"
sha256: "{{ weave_npc_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
kube_ovn:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
@ -861,7 +870,7 @@ downloads:
tag: "{{ kube_ovn_container_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
kube_router:
enabled: "{{ kube_network_plugin == 'kube-router' }}"
@ -870,7 +879,7 @@ downloads:
tag: "{{ kube_router_image_tag }}"
sha256: "{{ kube_router_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
pod_infra:
enabled: true
@ -879,7 +888,7 @@ downloads:
tag: "{{ pod_infra_image_tag }}"
sha256: "{{ pod_infra_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
kube-vip:
enabled: "{{ kube_vip_enabled }}"
@ -888,7 +897,7 @@ downloads:
tag: "{{ kube_vip_image_tag }}"
sha256: "{{ kube_vip_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
nginx:
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
@ -897,7 +906,7 @@ downloads:
tag: "{{ nginx_image_tag }}"
sha256: "{{ nginx_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
haproxy:
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}"
@ -906,7 +915,7 @@ downloads:
tag: "{{ haproxy_image_tag }}"
sha256: "{{ haproxy_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
coredns:
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
@ -915,7 +924,7 @@ downloads:
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
nodelocaldns:
enabled: "{{ enable_nodelocaldns }}"
@ -924,7 +933,7 @@ downloads:
tag: "{{ nodelocaldns_image_tag }}"
sha256: "{{ nodelocaldns_digest_checksum | default(None) }}"
groups:
- k8s_cluster
- k8s_cluster
dnsautoscaler:
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
@ -933,7 +942,7 @@ downloads:
tag: "{{ dnsautoscaler_image_tag }}"
sha256: "{{ dnsautoscaler_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
helm:
enabled: "{{ helm_enabled }}"
@ -946,7 +955,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube_control_plane
- kube_control_plane
krew:
enabled: "{{ krew_enabled }}"
@ -959,7 +968,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube_control_plane
- kube_control_plane
registry:
enabled: "{{ registry_enabled }}"
@ -968,7 +977,7 @@ downloads:
tag: "{{ registry_image_tag }}"
sha256: "{{ registry_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
metrics_server:
enabled: "{{ metrics_server_enabled }}"
@ -977,7 +986,7 @@ downloads:
tag: "{{ metrics_server_image_tag }}"
sha256: "{{ metrics_server_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
@ -986,7 +995,7 @@ downloads:
tag: "{{ local_volume_provisioner_image_tag }}"
sha256: "{{ local_volume_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
cephfs_provisioner:
enabled: "{{ cephfs_provisioner_enabled }}"
@ -995,7 +1004,7 @@ downloads:
tag: "{{ cephfs_provisioner_image_tag }}"
sha256: "{{ cephfs_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
rbd_provisioner:
enabled: "{{ rbd_provisioner_enabled }}"
@ -1004,7 +1013,7 @@ downloads:
tag: "{{ rbd_provisioner_image_tag }}"
sha256: "{{ rbd_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
local_path_provisioner:
enabled: "{{ local_path_provisioner_enabled }}"
@ -1013,7 +1022,7 @@ downloads:
tag: "{{ local_path_provisioner_image_tag }}"
sha256: "{{ local_path_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
ingress_nginx_controller:
enabled: "{{ ingress_nginx_enabled }}"
@ -1022,7 +1031,7 @@ downloads:
tag: "{{ ingress_nginx_controller_image_tag }}"
sha256: "{{ ingress_nginx_controller_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
ingress_alb_controller:
enabled: "{{ ingress_alb_enabled }}"
@ -1031,7 +1040,7 @@ downloads:
tag: "{{ alb_ingress_image_tag }}"
sha256: "{{ ingress_alb_controller_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
cert_manager_controller:
enabled: "{{ cert_manager_enabled }}"
@ -1040,7 +1049,7 @@ downloads:
tag: "{{ cert_manager_controller_image_tag }}"
sha256: "{{ cert_manager_controller_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
cert_manager_cainjector:
enabled: "{{ cert_manager_enabled }}"
@ -1049,7 +1058,7 @@ downloads:
tag: "{{ cert_manager_cainjector_image_tag }}"
sha256: "{{ cert_manager_cainjector_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
cert_manager_webhook:
enabled: "{{ cert_manager_enabled }}"
@ -1058,7 +1067,7 @@ downloads:
tag: "{{ cert_manager_webhook_image_tag }}"
sha256: "{{ cert_manager_webhook_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
csi_attacher:
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@ -1067,7 +1076,7 @@ downloads:
tag: "{{ csi_attacher_image_tag }}"
sha256: "{{ csi_attacher_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
csi_provisioner:
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@ -1076,7 +1085,7 @@ downloads:
tag: "{{ csi_provisioner_image_tag }}"
sha256: "{{ csi_provisioner_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
csi_snapshotter:
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@ -1085,7 +1094,7 @@ downloads:
tag: "{{ csi_snapshotter_image_tag }}"
sha256: "{{ csi_snapshotter_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
snapshot_controller:
enabled: "{{ csi_snapshot_controller_enabled }}"
@ -1094,7 +1103,7 @@ downloads:
tag: "{{ snapshot_controller_image_tag }}"
sha256: "{{ snapshot_controller_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
csi_resizer:
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@ -1103,7 +1112,7 @@ downloads:
tag: "{{ csi_resizer_image_tag }}"
sha256: "{{ csi_resizer_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
csi_node_driver_registrar:
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
@ -1112,7 +1121,7 @@ downloads:
tag: "{{ csi_node_driver_registrar_image_tag }}"
sha256: "{{ csi_node_driver_registrar_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
cinder_csi_plugin:
enabled: "{{ cinder_csi_enabled }}"
@ -1121,7 +1130,7 @@ downloads:
tag: "{{ cinder_csi_plugin_image_tag }}"
sha256: "{{ cinder_csi_plugin_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
aws_ebs_csi_plugin:
enabled: "{{ aws_ebs_csi_enabled }}"
@ -1130,7 +1139,7 @@ downloads:
tag: "{{ aws_ebs_csi_plugin_image_tag }}"
sha256: "{{ aws_ebs_csi_plugin_digest_checksum | default(None) }}"
groups:
- kube_node
- kube_node
dashboard:
enabled: "{{ dashboard_enabled }}"
@ -1139,7 +1148,7 @@ downloads:
tag: "{{ dashboard_image_tag }}"
sha256: "{{ dashboard_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
dashboard_metrics_scrapper:
enabled: "{{ dashboard_enabled }}"
@ -1148,7 +1157,7 @@ downloads:
tag: "{{ dashboard_metrics_scraper_tag }}"
sha256: "{{ dashboard_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
metallb_speaker:
enabled: "{{ metallb_speaker_enabled }}"
@ -1157,7 +1166,7 @@ downloads:
tag: "{{ metallb_version }}"
sha256: "{{ metallb_speaker_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
metallb_controller:
enabled: "{{ metallb_enabled }}"
@ -1166,7 +1175,7 @@ downloads:
tag: "{{ metallb_version }}"
sha256: "{{ metallb_controller_digest_checksum | default(None) }}"
groups:
- kube_control_plane
- kube_control_plane
yq:
enabled: "{{ argocd_enabled }}"
@ -1179,7 +1188,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube_control_plane
- kube_control_plane
download_defaults:
container: false

View File

@ -6,6 +6,8 @@ ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='s
# selinux state
preinstall_selinux_state: permissive
# Setting this value to false will fail
# For details, read this comment https://github.com/kubernetes-sigs/kubespray/pull/11016#issuecomment-2004985001
kube_api_anonymous_auth: true
# Default value, but will be set to true automatically if detected
@ -16,7 +18,7 @@ kubelet_fail_swap_on: true
kubelet_swap_behavior: LimitedSwap
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.29.2
kube_version: v1.29.4
## The minimum version working
kube_version_min_required: v1.27.0
@ -50,6 +52,9 @@ kubeadm_join_phases_skip_default: []
kubeadm_join_phases_skip: >-
{{ kubeadm_join_phases_skip_default }}
# Set to true to remove the role binding to anonymous users created by kubeadm
remove_anonymous_access: false
# A string slice of values which specify the addresses to use for NodePorts.
# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
# The default empty string slice ([]) means to use all local addresses.

View File

@ -3,7 +3,7 @@
# do not run gather facts when bootstrap-os in roles
when: >
ansible_play_role_names |
intersect(['bootstrap-os', 'kubernetes-sigs.kubespray.bootstrap-os']) |
intersect(['bootstrap-os', 'kubernetes_sigs.kubespray.bootstrap-os']) |
length == 0
tags:
- always

View File

@ -82,11 +82,12 @@
Minimum version is {{ calico_min_version_required }} supported by the previous kubespray release.
But current version is {{ calico_version_on_server.stdout }}.
- name: "Check that cluster_id is set if calico_rr enabled"
- name: "Check that cluster_id is set and a valid IPv4 address if calico_rr enabled"
assert:
that:
- cluster_id is defined
msg: "A unique cluster_id is required if using calico_rr"
- cluster_id is ansible.utils.ipv4
msg: "A unique cluster_id is required if using calico_rr, and it must be a valid IPv4 address"
when:
- peer_with_calico_rr
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -23,6 +23,38 @@
when:
- inventory_hostname == groups['kube_control_plane'][0]
- name: Calico | Get node for per node peering
command:
cmd: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }}"
register: output_get_node
when:
- inventory_hostname in groups['k8s_cluster']
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
delegate_to: "{{ groups['kube_control_plane'][0] }}"
- name: Calico | Patch node asNumber for per node peering
command:
cmd: |-
{{ bin_dir }}/calicoctl.sh patch node "{{ inventory_hostname }}" --patch '{{ patch is string | ternary(patch, patch | to_json) }}'
vars:
patch: >
{"spec": {
"bgp": {
"asNumber": "{{ local_as }}"
},
"orchRefs": [{"nodeName": "{{ inventory_hostname }}", "orchestrator": "k8s"}]
}}
register: output
retries: 0
until: output.rc == 0
delay: "{{ retry_stagger | random + 3 }}"
when:
- inventory_hostname in groups['k8s_cluster']
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
- output_get_node.rc == 0
- name: Calico | Configure node asNumber for per node peering
command:
cmd: "{{ bin_dir }}/calicoctl.sh apply -f -"
@ -48,6 +80,7 @@
- inventory_hostname in groups['k8s_cluster']
- local_as is defined
- groups['calico_rr'] | default([]) | length == 0
- output_get_node.rc != 0
- name: Calico | Configure peering with router(s) at node scope
command:
@ -64,6 +97,9 @@
"asNumber": "{{ item.as }}",
"node": "{{ inventory_hostname }}",
"peerIP": "{{ item.router_id }}",
{% if calico_version is version('v3.26.0', '>=') and (item.filters | default([]) | length > 0) %}
"filters": {{ item.filters }},
{% endif %}
"sourceAddress": "{{ item.sourceaddress | default('UseNodeIP') }}"
}}
register: output

View File

@ -157,6 +157,7 @@ rules:
- networksets
- bgpconfigurations
- bgppeers
- bgpfilters
- felixconfigurations
- kubecontrollersconfigurations
- ippools

View File

@ -16,6 +16,11 @@ rules:
- pods/status
verbs:
- patch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- update
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities

View File

@ -38,7 +38,7 @@ spec:
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
initContainers:
{% if calico_datastore == "kdd" %}
{% if calico_datastore == "kdd" and not calico_ipam_host_local %}
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
@ -310,6 +310,10 @@ spec:
value: "{{ calico_node_ignorelooserpf }}"
- name: CALICO_MANAGE_CNI
value: "true"
{% if calico_ipam_host_local %}
- name: USE_POD_CIDR
value: "true"
{% endif %}
{% if calico_node_extra_envs is defined %}
{% for key in calico_node_extra_envs %}
- name: {{ key }}
@ -428,7 +432,7 @@ spec:
hostPath:
path: /run/xtables.lock
type: FileOrCreate
{% if calico_datastore == "kdd" %}
{% if calico_datastore == "kdd" and not calico_ipam_host_local %}
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.

View File

@ -136,11 +136,10 @@ spec:
name: cacert
readOnly: true
{% endif %}
# Needed for version >=3.7 when the 'host-local' ipam is used
# Should never happen given templates/cni-calico.conflist.j2
# Configure route aggregation based on pod CIDR.
# - name: USE_POD_CIDR
# value: "true"
{% if calico_ipam_host_local %}
- name: USE_POD_CIDR
value: "true"
{% endif %}
livenessProbe:
httpGet:
path: /liveness

Some files were not shown because too many files have changed in this diff Show More