Merge pull request #10643 from VannTen/cleanup/k8s_node_templates
Refactor kubernetes/node templatespull/11567/head
commit
03a055c383
|
@ -296,8 +296,8 @@ node_taints:
|
||||||
|
|
||||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments.
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments.
|
||||||
|
|
||||||
Extra flags for the kubelet can be specified using these variables,
|
Extra flags for the kubelet can be specified using these variables, in the form of dicts of key-value pairs of
|
||||||
in the form of dicts of key-value pairs of configuration parameters that will be inserted into the kubelet YAML config file. The `kubelet_node_config_extra_args` apply kubelet settings only to nodes and not control planes. Example:
|
configuration parameters that will be inserted into the kubelet YAML config file. Example:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
kubelet_config_extra_args:
|
kubelet_config_extra_args:
|
||||||
|
@ -312,14 +312,10 @@ kubelet_config_extra_args:
|
||||||
The possible vars are:
|
The possible vars are:
|
||||||
|
|
||||||
* *kubelet_config_extra_args*
|
* *kubelet_config_extra_args*
|
||||||
* *kubelet_node_config_extra_args*
|
|
||||||
|
|
||||||
Previously, the same parameters could be passed as flags to kubelet binary with the following vars:
|
Previously, the same parameters could be passed as flags to kubelet binary with the following vars:
|
||||||
|
|
||||||
* *kubelet_custom_flags*
|
* *kubelet_custom_flags*
|
||||||
* *kubelet_node_custom_flags*
|
|
||||||
|
|
||||||
The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not control planes. Example:
|
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
kubelet_custom_flags:
|
kubelet_custom_flags:
|
||||||
|
|
|
@ -36,31 +36,20 @@ kubelet_secure_addresses: "localhost link-local {{ kube_pods_subnet }} {{ kube_n
|
||||||
# Reserve this space for kube resources
|
# Reserve this space for kube resources
|
||||||
# Whether to run kubelet and container-engine daemons in a dedicated cgroup. (Not required for resource reservations).
|
# Whether to run kubelet and container-engine daemons in a dedicated cgroup. (Not required for resource reservations).
|
||||||
kube_reserved: false
|
kube_reserved: false
|
||||||
kube_reserved_cgroups_for_service_slice: kube.slice
|
|
||||||
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
||||||
kube_memory_reserved: 256Mi
|
kube_memory_reserved: "256Mi"
|
||||||
kube_cpu_reserved: 100m
|
kube_cpu_reserved: "100m"
|
||||||
# kube_ephemeral_storage_reserved: 2Gi
|
kube_ephemeral_storage_reserved: "500Mi"
|
||||||
# kube_pid_reserved: "1000"
|
kube_pid_reserved: "1000"
|
||||||
# Reservation for control plane hosts
|
|
||||||
kube_master_memory_reserved: 512Mi
|
|
||||||
kube_master_cpu_reserved: 200m
|
|
||||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
|
||||||
# kube_master_pid_reserved: "1000"
|
|
||||||
|
|
||||||
# Set to true to reserve resources for system daemons
|
# Set to true to reserve resources for system daemons
|
||||||
system_reserved: false
|
system_reserved: false
|
||||||
system_reserved_cgroups_for_service_slice: system.slice
|
system_reserved_cgroups_for_service_slice: system.slice
|
||||||
system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}"
|
system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}"
|
||||||
system_memory_reserved: 512Mi
|
system_memory_reserved: "512Mi"
|
||||||
system_cpu_reserved: 500m
|
system_cpu_reserved: "500m"
|
||||||
# system_ephemeral_storage_reserved: 2Gi
|
system_ephemeral_storage_reserved: "500Mi"
|
||||||
# system_pid_reserved: "1000"
|
system_pid_reserved: 1000
|
||||||
# Reservation for control plane hosts
|
|
||||||
system_master_memory_reserved: 256Mi
|
|
||||||
system_master_cpu_reserved: 250m
|
|
||||||
# system_master_ephemeral_storage_reserved: 2Gi
|
|
||||||
# system_master_pid_reserved: "1000"
|
|
||||||
|
|
||||||
## Eviction Thresholds to avoid system OOMs
|
## Eviction Thresholds to avoid system OOMs
|
||||||
# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds
|
# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds
|
||||||
|
@ -136,9 +125,6 @@ kubelet_config_extra_args_cgroupfs:
|
||||||
systemCgroups: /system.slice
|
systemCgroups: /system.slice
|
||||||
cgroupRoot: /
|
cgroupRoot: /
|
||||||
|
|
||||||
## Support parameters to be passed to kubelet via kubelet-config.yaml only on nodes, not control plane nodes
|
|
||||||
kubelet_node_config_extra_args: {}
|
|
||||||
|
|
||||||
# Maximum number of container log files that can be present for a container.
|
# Maximum number of container log files that can be present for a container.
|
||||||
kubelet_logfiles_max_nr: 5
|
kubelet_logfiles_max_nr: 5
|
||||||
|
|
||||||
|
@ -148,9 +134,6 @@ kubelet_logfiles_max_size: 10Mi
|
||||||
## Support custom flags to be passed to kubelet
|
## Support custom flags to be passed to kubelet
|
||||||
kubelet_custom_flags: []
|
kubelet_custom_flags: []
|
||||||
|
|
||||||
## Support custom flags to be passed to kubelet only on nodes, not control plane nodes
|
|
||||||
kubelet_node_custom_flags: []
|
|
||||||
|
|
||||||
# If non-empty, will use this string as identification instead of the actual hostname
|
# If non-empty, will use this string as identification instead of the actual hostname
|
||||||
kube_override_hostname: >-
|
kube_override_hostname: >-
|
||||||
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
|
{%- if cloud_provider is defined and cloud_provider in ['aws'] -%}
|
||||||
|
|
|
@ -61,56 +61,16 @@ clusterDNS:
|
||||||
- {{ dns_address }}
|
- {{ dns_address }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{# Node reserved CPU/memory #}
|
{# Node reserved CPU/memory #}
|
||||||
{% if kube_reserved | bool %}
|
{% for scope in "kube", "system" %}
|
||||||
kubeReservedCgroup: {{ kube_reserved_cgroups }}
|
{% if lookup('ansible.builtin.vars', scope + "_reserved") | bool %}
|
||||||
|
{{ scope }}ReservedCgroup: {{ lookup('ansible.builtin.vars', scope + '_reserved_cgroups') }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
kubeReserved:
|
{{ scope }}Reserved:
|
||||||
{% if 'kube_control_plane' in group_names %}
|
{% for resource in "cpu", "memory", "ephemeral-storage", "pid" %}
|
||||||
cpu: "{{ kube_master_cpu_reserved }}"
|
{{ resource }}: "{{ lookup('ansible.builtin.vars', scope + '_' ~ (resource | replace('-', '_')) + '_reserved') }}"
|
||||||
memory: {{ kube_master_memory_reserved }}
|
{% endfor %}
|
||||||
{% if kube_master_ephemeral_storage_reserved is defined %}
|
{% endfor %}
|
||||||
ephemeral-storage: {{ kube_master_ephemeral_storage_reserved }}
|
{% if eviction_hard is defined and eviction_hard %}
|
||||||
{% endif %}
|
|
||||||
{% if kube_master_pid_reserved is defined %}
|
|
||||||
pid: "{{ kube_master_pid_reserved }}"
|
|
||||||
{% endif %}
|
|
||||||
{% else %}
|
|
||||||
cpu: "{{ kube_cpu_reserved }}"
|
|
||||||
memory: {{ kube_memory_reserved }}
|
|
||||||
{% if kube_ephemeral_storage_reserved is defined %}
|
|
||||||
ephemeral-storage: {{ kube_ephemeral_storage_reserved }}
|
|
||||||
{% endif %}
|
|
||||||
{% if kube_pid_reserved is defined %}
|
|
||||||
pid: "{{ kube_pid_reserved }}"
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if system_reserved | bool %}
|
|
||||||
systemReservedCgroup: {{ system_reserved_cgroups }}
|
|
||||||
systemReserved:
|
|
||||||
{% if 'kube_control_plane' in group_names %}
|
|
||||||
cpu: "{{ system_master_cpu_reserved }}"
|
|
||||||
memory: {{ system_master_memory_reserved }}
|
|
||||||
{% if system_master_ephemeral_storage_reserved is defined %}
|
|
||||||
ephemeral-storage: {{ system_master_ephemeral_storage_reserved }}
|
|
||||||
{% endif %}
|
|
||||||
{% if system_master_pid_reserved is defined %}
|
|
||||||
pid: "{{ system_master_pid_reserved }}"
|
|
||||||
{% endif %}
|
|
||||||
{% else %}
|
|
||||||
cpu: "{{ system_cpu_reserved }}"
|
|
||||||
memory: {{ system_memory_reserved }}
|
|
||||||
{% if system_ephemeral_storage_reserved is defined %}
|
|
||||||
ephemeral-storage: {{ system_ephemeral_storage_reserved }}
|
|
||||||
{% endif %}
|
|
||||||
{% if system_pid_reserved is defined %}
|
|
||||||
pid: "{{ system_pid_reserved }}"
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if ('kube_control_plane' in group_names) and (eviction_hard_control_plane is defined) and eviction_hard_control_plane %}
|
|
||||||
evictionHard:
|
|
||||||
{{ eviction_hard_control_plane | to_nice_yaml(indent=2) | indent(2) }}
|
|
||||||
{% elif ('kube_control_plane' not in group_names) and (eviction_hard is defined) and eviction_hard %}
|
|
||||||
evictionHard:
|
evictionHard:
|
||||||
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
|
{{ eviction_hard | to_nice_yaml(indent=2) | indent(2) }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -118,9 +78,6 @@ resolvConf: "{{ kube_resolv_conf }}"
|
||||||
{% if kubelet_config_extra_args %}
|
{% if kubelet_config_extra_args %}
|
||||||
{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }}
|
{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %}
|
|
||||||
{{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }}
|
|
||||||
{% endif %}
|
|
||||||
{% if kubelet_feature_gates or kube_feature_gates %}
|
{% if kubelet_feature_gates or kube_feature_gates %}
|
||||||
featureGates:
|
featureGates:
|
||||||
{% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %}
|
{% for feature in (kubelet_feature_gates | default(kube_feature_gates, true)) %}
|
||||||
|
|
|
@ -14,7 +14,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
--runtime-cgroups={{ kubelet_runtime_cgroups }} \
|
--runtime-cgroups={{ kubelet_runtime_cgroups }} \
|
||||||
{% endset %}
|
{% endset %}
|
||||||
|
|
||||||
KUBELET_ARGS="{{ kubelet_args_base }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_custom_flags | join(' ') }}"
|
||||||
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
||||||
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -29,6 +29,9 @@ kube_proxy_mode: ipvs
|
||||||
## The timeout for init first control-plane
|
## The timeout for init first control-plane
|
||||||
kubeadm_init_timeout: 300s
|
kubeadm_init_timeout: 300s
|
||||||
|
|
||||||
|
# TODO: remove this
|
||||||
|
kube_reserved_cgroups_for_service_slice: kube.slice
|
||||||
|
|
||||||
## List of kubeadm init phases that should be skipped during control plane setup
|
## List of kubeadm init phases that should be skipped during control plane setup
|
||||||
## By default 'addon/coredns' is skipped
|
## By default 'addon/coredns' is skipped
|
||||||
## 'addon/kube-proxy' gets skipped for some network plugins
|
## 'addon/kube-proxy' gets skipped for some network plugins
|
||||||
|
|
|
@ -22,3 +22,6 @@ containerd_registries_mirrors:
|
||||||
- host: http://172.19.16.11:5000
|
- host: http://172.19.16.11:5000
|
||||||
capabilities: ["pull", "resolve", "push"]
|
capabilities: ["pull", "resolve", "push"]
|
||||||
skip_verify: true
|
skip_verify: true
|
||||||
|
|
||||||
|
kube_reserved: true
|
||||||
|
system_reserved: true
|
||||||
|
|
Loading…
Reference in New Issue