134 lines
6.7 KiB
Django/Jinja
134 lines
6.7 KiB
Django/Jinja
### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/
|
|
### All upstream values should be present in this file
|
|
|
|
# logging to stderr means we get it in the systemd journal
|
|
KUBE_LOGTOSTDERR="--logtostderr=true"
|
|
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
|
|
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
|
KUBELET_ADDRESS="--address={{ kubelet_bind_address }} --node-ip={{ kubelet_address }}"
|
|
# The port for the info server to serve on
|
|
# KUBELET_PORT="--port=10250"
|
|
{% if kube_override_hostname|default('') %}
|
|
# You may leave this blank to use the actual hostname
|
|
KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|
{% endif %}
|
|
{# Base kubelet args #}
|
|
{% set kubelet_args_base -%}
|
|
{# start kubeadm specific settings #}
|
|
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \
|
|
--kubeconfig={{ kube_config_dir }}/kubelet.conf \
|
|
{% if kube_version is version('v1.8', '<') %}
|
|
--require-kubeconfig \
|
|
{% endif %}
|
|
{% if kubelet_authentication_token_webhook %}
|
|
--authentication-token-webhook \
|
|
{% endif %}
|
|
{% if kubelet_authorization_mode_webhook %}
|
|
--authorization-mode=Webhook \
|
|
{% endif %}
|
|
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} \
|
|
--client-ca-file={{ kube_cert_dir }}/ca.crt \
|
|
{% if kubelet_rotate_certificates %}
|
|
--rotate-certificates \
|
|
{% endif %}
|
|
--pod-manifest-path={{ kube_manifest_dir }} \
|
|
{% if kube_version is version('v1.12.0', '<') %}
|
|
--cadvisor-port={{ kube_cadvisor_port }} \
|
|
{% endif %}
|
|
{# end kubeadm specific settings #}
|
|
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
|
|
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
|
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
|
|
--max-pods={{ kubelet_max_pods }} \
|
|
{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
|
|
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
|
|
{% endif %}
|
|
{% if container_manager == 'crio' %}
|
|
--container-runtime=remote \
|
|
--container-runtime-endpoint=/var/run/crio/crio.sock \
|
|
{% endif %}
|
|
--anonymous-auth=false \
|
|
--read-only-port={{ kube_read_only_port }} \
|
|
{% if kube_version is version('v1.8', '<') %}
|
|
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
|
{% else %}
|
|
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
|
{% endif %}
|
|
{% if dynamic_kubelet_configuration %}
|
|
--dynamic-config-dir={{ dynamic_kubelet_configuration_dir }} \
|
|
{% endif %}
|
|
--runtime-cgroups={{ kubelet_runtime_cgroups }} --kubelet-cgroups={{ kubelet_kubelet_cgroups }} \
|
|
{% endset %}
|
|
|
|
{# Node reserved CPU/memory #}
|
|
{% if is_kube_master|bool %}
|
|
{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
|
|
{% else %}
|
|
{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
|
|
{% endif %}
|
|
|
|
{# DNS settings for kubelet #}
|
|
{% if dns_mode == 'coredns' %}
|
|
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
|
|
{% elif dns_mode == 'coredns_dual' %}
|
|
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
|
|
{% elif dns_mode == 'manual' %}
|
|
{% set kubelet_args_cluster_dns %}--cluster-dns={{ manual_dns_server }}{% endset %}
|
|
{% else %}
|
|
{% set kubelet_args_cluster_dns %}{% endset %}
|
|
{% endif %}
|
|
{% if enable_nodelocaldns %}
|
|
{% set kubelet_args_cluster_dns %}--cluster-dns={{ nodelocaldns_ip }}{% endset %}
|
|
{% endif %}
|
|
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
|
|
|
|
{# Kubelet node labels #}
|
|
{% set role_node_labels = [] %}
|
|
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
|
{% if inventory_hostname in nvidia_gpu_nodes %}
|
|
{% set dummy = role_node_labels.append('nvidia.com/gpu=true') %}
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
{% set inventory_node_labels = [] %}
|
|
{% if node_labels is defined %}
|
|
{% if node_labels is mapping %}
|
|
{% for labelname, labelvalue in node_labels.items() %}
|
|
{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
|
|
{% endfor %}
|
|
{% else %}
|
|
{% for label in node_labels.split(",") %}
|
|
{% set dummy = inventory_node_labels.append(label) %}
|
|
{% endfor %}
|
|
{% endif %}
|
|
{% set all_node_labels = role_node_labels + inventory_node_labels %}
|
|
|
|
{# Kubelet node taints for gpu #}
|
|
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
|
|
{% if inventory_hostname in nvidia_gpu_nodes and node_taints is defined %}
|
|
{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %}
|
|
{% elif inventory_hostname in nvidia_gpu_nodes and node_taints is not defined %}
|
|
{% set node_taints = [] %}
|
|
{% set dummy = node_taints.append('nvidia.com/gpu=:NoSchedule') %}
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %}--node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
|
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "cni", "flannel", "weave", "contiv", "cilium", "kube-router"] %}
|
|
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
|
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
|
|
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
|
|
{% endif %}
|
|
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
|
# Should this cluster be allowed to run privileged docker containers
|
|
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
|
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
|
|
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
|
{% elif cloud_provider is defined and cloud_provider in ["external"] %}
|
|
KUBELET_CLOUDPROVIDER="--cloud-provider=external --cloud-config={{ kube_config_dir }}/cloud_config"
|
|
{% else %}
|
|
KUBELET_CLOUDPROVIDER=""
|
|
{% endif %}
|
|
|
|
PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|