kubespray/roles/kubespray-defaults/defaults/main.yaml

418 lines
17 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

---
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Use proxycommand if bastion host is in group all
# This change obseletes editing ansible.cfg file depending on bastion existance
ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ hostvars['bastion']['ansible_user'] }}@{{ hostvars['bastion']['ansible_host'] }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}"
kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected
is_atomic: false
# optional disable the swap
disable_swap: true
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.11.2
## Kube Proxy mode One of ['iptables','ipvs']
kube_proxy_mode: iptables
# Kube-proxy nodeport address.
# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest
kube_proxy_nodeport_addresses: false
# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors: false
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
etcd_data_dir: /var/lib/etcd
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns, manual or none
dns_mode: kubedns
# Should be set to a cluster IP if using a custom cluster DNS
# manual_dns_server: 10.x.x.x
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
# Choose network plugin (cilium, calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Determines if calico-rr group exists
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
# Set to false to disable calico-upgrade
calico_upgrade_enabled: true
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 64 nodes with 254 pods per node.
# Example: Up to 256 nodes, 100 pods per node (/16 network):
# - kube_service_addresses: 10.233.0.0/17
# - kube_pods_subnet: 10.233.128.0/17
# - kube_network_node_prefix: 25
# Example: Up to 4096 nodes, 100 pods per node (/12 network):
# - kube_service_addresses: 10.192.0.0/13
# - kube_pods_subnet: 10.200.0.0/13
# - kube_network_node_prefix: 25
kube_network_node_prefix: 24
# The virtual cluster IP, real host IPs and ports the API Server will be
# listening on.
# NOTE: loadbalancer_apiserver_localhost somewhat alters the final API enpdoint
# access IP value (automatically evaluated below)
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_bind_address: 0.0.0.0
# https
kube_apiserver_port: 6443
# http
kube_apiserver_insecure_bind_address: 127.0.0.1
kube_apiserver_insecure_port: 8080
# Aggregator
kube_api_aggregator_routing: false
# Container for runtime
container_manager: docker
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
#docker_storage_options: -s overlay2
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
docker_container_storage_setup: false
## It must be define a disk path for docker_container_storage_setup_devs.
## Otherwise docker-storage-setup will be executed incorrectly.
#docker_container_storage_setup_devs: /dev/vdb
## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
docker_dns_servers_strict: false
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
docker_iptables_enabled: "false"
# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
# define docker bin_dir
docker_bin_dir: "/usr/bin"
## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipddress and domain_name.
## example define 172.19.16.11 or mirror.registry.io
#insecure_registries:
# - mirror.registry.io
# - 172.19.16.11
## Add other registry,example China registry mirror.
#registry_mirrors:
# - https://registry.docker-cn.com
# - https://mirror.aliyuncs.com
## If non-empty will override default system MounFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
#docker_mount_flags:
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
docker_options: >-
{%- if insecure_registries is defined -%}
{{ insecure_registries | map('regex_replace', '^(.*)$', '--insecure-registry=\1' ) | list | join(' ') }}
{%- endif %}
{% if registry_mirrors is defined -%}
{{ registry_mirrors | map('regex_replace', '^(.*)$', '--registry-mirror=\1' ) | list | join(' ') }}
{%- endif %}
--graph={{ docker_daemon_graph }} {{ docker_log_opts }}
{%- if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current
--default-runtime=docker-runc --exec-opt native.cgroupdriver=systemd
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current --signature-verification=false
{%- endif -%}
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: docker
cert_management: script
vault_deployment_type: docker
helm_deployment_type: host
# Enable kubeadm deployment (experimental)
kubeadm_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
kubectl_localhost: false
# Define credentials_dir here so it can be overriden
credentials_dir: "{{ inventory_dir }}/credentials"
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Addons which can be enabled
efk_enabled: false
helm_enabled: false
registry_enabled: false
enable_network_policy: true
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
persistent_volumes_enabled: false
cephfs_provisioner_enabled: false
ingress_nginx_enabled: false
cert_manager_enabled: false
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
openstack_lbaas_enabled: false
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
# openstack_lbaas_use_octavia: False
# openstack_lbaas_method: "ROUND_ROBIN"
# openstack_lbaas_provider: "haproxy"
openstack_lbaas_create_monitor: "yes"
openstack_lbaas_monitor_delay: "1m"
openstack_lbaas_monitor_timeout: "30s"
openstack_lbaas_monitor_max_retries: "3"
## List of authorization modes that must be configured for
## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
## 'RBAC' modes are tested. Order is important.
authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint
kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates:
- "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
- "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
- "MountPropagation={{ local_volume_provisioner_enabled | string }}"
# Vault data dirs.
vault_base_dir: /etc/vault
vault_cert_dir: "{{ vault_base_dir }}/ssl"
vault_config_dir: "{{ vault_base_dir }}/config"
vault_roles_dir: "{{ vault_base_dir }}/roles"
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
# Local volume provisioner dirs
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
# weave's network password for encryption
# if null then no network encryption
# you can use --extra-vars to pass the password in command line
weave_password: EnterPasswordHere
# Weave uses consensus mode by default
# Enabling seed mode allow to dynamically add or remove hosts
# https://www.weave.works/docs/net/latest/ipam/
weave_mode_seed: false
# This two variable are automatically changed by the weave's role in group_vars/k8s-cluster.yml.
# Do not manually change these values
weave_seed: uninitialized
weave_peers: uninitialized
## Set no_proxy to all assigned cluster IPs and hostnames
no_proxy: >-
{%- if http_proxy is defined or https_proxy is defined %}
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name| default('') }},
{{ loadbalancer_apiserver.address | default('') }},
{%- endif -%}
{%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }},
{%- if (item != hostvars[item]['ansible_hostname']) -%}
{{ hostvars[item]['ansible_hostname'] }},
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
{%- endif -%}
{{ item }},{{ item }}.{{ dns_domain }},
{%- endfor -%}
127.0.0.1,localhost
{%- endif %}
proxy_env:
http_proxy: "{{ http_proxy| default ('') }}"
https_proxy: "{{ https_proxy| default ('') }}"
no_proxy: "{{ no_proxy| default ('') }}"
ssl_ca_dirs: >-
[
{% if ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -%}
'/usr/share/ca-certificates',
{% elif ansible_os_family == 'RedHat' -%}
'/etc/pki/tls',
'/etc/pki/ca-trust',
{% elif ansible_os_family == 'Debian' -%}
'/usr/share/ca-certificates',
{% endif -%}
]
# Vars for pointing to kubernetes api endpoints
is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}"
kube_apiserver_count: "{{ groups['kube-master'] | length }}"
kube_apiserver_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
kube_apiserver_access_address: "{{ access_ip | default(kube_apiserver_address) }}"
first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
loadbalancer_apiserver_localhost: "{{ loadbalancer_apiserver is not defined }}"
# applied if only external loadbalancer_apiserver is defined, otherwise ignored
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master -%}
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
{%- else -%}
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- else -%}
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- endif -%}
{%- endif %}
kube_apiserver_insecure_endpoint: >-
http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
kube_apiserver_client_cert: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.crt
{%- else -%}
{{ kube_cert_dir }}/apiserver.pem
{%- endif %}
kube_apiserver_client_key: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.key
{%- else -%}
{{ kube_cert_dir }}/apiserver-key.pem
{%- endif %}
# Set to true to deploy etcd-events cluster
etcd_events_cluster_enabled: false
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}"
etcd_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_events_access_address: "{{ access_ip | default(etcd_address) }}"
etcd_peer_url: "https://{{ etcd_access_address }}:2380"
etcd_client_url: "https://{{ etcd_access_address }}:2379"
etcd_events_peer_url: "https://{{ etcd_events_access_address }}:2382"
etcd_events_client_url: "https://{{ etcd_events_access_address }}:2381"
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_access_addresses: |-
{% for item in groups['etcd'] -%}
https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381{% if not loop.last %},{% endif %}
{%- endfor %}
# user should set etcd_member_name in inventory/mycluster/hosts.ini
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{ hostvars[host].etcd_member_name | default("etcd" + loop.index|string) }}{% endif %}
{% endfor %}
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
etcd_events_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index|string) }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %}
{%- endfor %}
podsecuritypolicy_enabled: false
etcd_heartbeat_interval: "250"
etcd_election_timeout: "5000"
etcd_snapshot_count: "10000"