293 lines
12 KiB
YAML
293 lines
12 KiB
YAML
---
|
||
cilium_min_version_required: "1.10"
|
||
# Log-level
|
||
cilium_debug: false
|
||
|
||
cilium_mtu: ""
|
||
cilium_enable_ipv4: true
|
||
cilium_enable_ipv6: false
|
||
|
||
# Cilium agent health port
|
||
cilium_agent_health_port: "{%- if cilium_version | regex_replace('v') is version('1.11.6', '>=') -%}9879 {%- else -%} 9876 {%- endif -%}"
|
||
|
||
# Identity allocation mode selects how identities are shared between cilium
|
||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||
# These can be queried with:
|
||
# `kubectl get ciliumid`
|
||
# - "kvstore" stores identities in an etcd kvstore.
|
||
# - In order to support External Workloads, "crd" is required
|
||
# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta
|
||
# - KVStore operations are only required when cilium-operator is running with any of the below options:
|
||
# - --synchronize-k8s-services
|
||
# - --synchronize-k8s-nodes
|
||
# - --identity-allocation-mode=kvstore
|
||
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
|
||
cilium_identity_allocation_mode: kvstore
|
||
|
||
# Etcd SSL dirs
|
||
cilium_cert_dir: /etc/cilium/certs
|
||
kube_etcd_cacert_file: ca.pem
|
||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||
|
||
# Limits for apps
|
||
cilium_memory_limit: 500M
|
||
cilium_cpu_limit: 500m
|
||
cilium_memory_requests: 64M
|
||
cilium_cpu_requests: 100m
|
||
|
||
# Overlay Network Mode
|
||
cilium_tunnel_mode: vxlan
|
||
# Optional features
|
||
cilium_enable_prometheus: false
|
||
# Enable if you want to make use of hostPort mappings
|
||
cilium_enable_portmap: false
|
||
# Monitor aggregation level (none/low/medium/maximum)
|
||
cilium_monitor_aggregation: medium
|
||
# Kube Proxy Replacement mode (strict/partial)
|
||
cilium_kube_proxy_replacement: partial
|
||
|
||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||
# to prevent service disruptions. See also:
|
||
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||
cilium_preallocate_bpf_maps: false
|
||
|
||
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
|
||
cilium_tofqdns_enable_poller: false
|
||
|
||
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
|
||
cilium_enable_legacy_services: false
|
||
|
||
# Deploy cilium even if kube_network_plugin is not cilium.
|
||
# This enables to deploy cilium alongside another CNI to replace kube-proxy.
|
||
cilium_deploy_additionally: false
|
||
|
||
# Auto direct nodes routes can be used to advertise pods routes in your cluster
|
||
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
|
||
# This works only if you have a L2 connectivity between all your nodes.
|
||
# You wil also have to specify the variable `cilium_native_routing_cidr` to
|
||
# make this work. Please refer to the cilium documentation for more
|
||
# information about this kind of setups.
|
||
cilium_auto_direct_node_routes: false
|
||
|
||
# Allows to explicitly specify the IPv4 CIDR for native routing.
|
||
# When specified, Cilium assumes networking for this CIDR is preconfigured and
|
||
# hands traffic destined for that range to the Linux network stack without
|
||
# applying any SNAT.
|
||
# Generally speaking, specifying a native routing CIDR implies that Cilium can
|
||
# depend on the underlying networking stack to route packets to their
|
||
# destination. To offer a concrete example, if Cilium is configured to use
|
||
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
|
||
# the user must configure the routes to reach pods, either manually or by
|
||
# setting the auto-direct-node-routes flag.
|
||
cilium_native_routing_cidr: ""
|
||
|
||
# Allows to explicitly specify the IPv6 CIDR for native routing.
|
||
cilium_native_routing_cidr_ipv6: ""
|
||
|
||
# Enable transparent network encryption.
|
||
cilium_encryption_enabled: false
|
||
|
||
# Encryption method. Can be either ipsec or wireguard.
|
||
# Only effective when `cilium_encryption_enabled` is set to true.
|
||
cilium_encryption_type: "ipsec"
|
||
|
||
# Enable encryption for pure node to node traffic.
|
||
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
|
||
cilium_ipsec_node_encryption: false
|
||
|
||
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
|
||
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
|
||
# it will fallback on the wireguard-go user-space implementation of WireGuard.
|
||
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
|
||
cilium_wireguard_userspace_fallback: false
|
||
|
||
# Enable Bandwidth Manager
|
||
# Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||
# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||
# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||
# Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||
cilium_enable_bandwidth_manager: false
|
||
|
||
# IP Masquerade Agent
|
||
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
||
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
||
cilium_ip_masq_agent_enable: false
|
||
|
||
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
||
cilium_non_masquerade_cidrs:
|
||
- 10.0.0.0/8
|
||
- 172.16.0.0/12
|
||
- 192.168.0.0/16
|
||
- 100.64.0.0/10
|
||
- 192.0.0.0/24
|
||
- 192.0.2.0/24
|
||
- 192.88.99.0/24
|
||
- 198.18.0.0/15
|
||
- 198.51.100.0/24
|
||
- 203.0.113.0/24
|
||
- 240.0.0.0/4
|
||
### Indicates whether to masquerade traffic to the link local prefix.
|
||
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
|
||
cilium_masq_link_local: false
|
||
### A time interval at which the agent attempts to reload config from disk
|
||
cilium_ip_masq_resync_interval: 60s
|
||
|
||
# Hubble
|
||
### Enable Hubble without install
|
||
cilium_enable_hubble: false
|
||
### Enable Hubble Metrics
|
||
cilium_enable_hubble_metrics: false
|
||
### if cilium_enable_hubble_metrics: true
|
||
cilium_hubble_metrics: {}
|
||
# - dns
|
||
# - drop
|
||
# - tcp
|
||
# - flow
|
||
# - icmp
|
||
# - http
|
||
### Enable Hubble install
|
||
cilium_hubble_install: false
|
||
### Enable auto generate certs if cilium_hubble_install: true
|
||
cilium_hubble_tls_generate: false
|
||
|
||
# The default IP address management mode is "Cluster Scope".
|
||
# https://docs.cilium.io/en/stable/concepts/networking/ipam/
|
||
cilium_ipam_mode: cluster-pool
|
||
|
||
# Cluster Pod CIDRs use the kube_pods_subnet value by default.
|
||
# If your node network is in the same range you will lose connectivity to other nodes.
|
||
# Defaults to kube_pods_subnet if not set.
|
||
# cilium_pool_cidr: 10.233.64.0/18
|
||
|
||
# When cilium_enable_ipv6 is used, you need to set the IPV6 value. Defaults to kube_pods_subnet_ipv6 if not set.
|
||
# cilium_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||
|
||
# When cilium IPAM uses the "Cluster Scope" mode, it will pre-allocate a segment of IP to each node,
|
||
# schedule the Pod to this node, and then allocate IP from here. cilium_pool_mask_size Specifies
|
||
# the size allocated from cluster Pod CIDR to node.ipam.podCIDRs
|
||
# Defaults to kube_network_node_prefix if not set.
|
||
# cilium_pool_mask_size: "24"
|
||
|
||
# cilium_pool_mask_size Specifies the size allocated to node.ipam.podCIDRs from cluster Pod IPV6 CIDR
|
||
# Defaults to kube_network_node_prefix_ipv6 if not set.
|
||
# cilium_pool_mask_size_ipv6: "120"
|
||
|
||
|
||
# Extra arguments for the Cilium agent
|
||
cilium_agent_custom_args: []
|
||
|
||
# For adding and mounting extra volumes to the cilium agent
|
||
cilium_agent_extra_volumes: []
|
||
cilium_agent_extra_volume_mounts: []
|
||
|
||
cilium_agent_extra_env_vars: []
|
||
|
||
cilium_operator_replicas: 2
|
||
|
||
# The address at which the cillium operator bind health check api
|
||
cilium_operator_api_serve_addr: "127.0.0.1:9234"
|
||
|
||
## A dictionary of extra config variables to add to cilium-config, formatted like:
|
||
## cilium_config_extra_vars:
|
||
## var1: "value1"
|
||
## var2: "value2"
|
||
cilium_config_extra_vars: {}
|
||
|
||
# For adding and mounting extra volumes to the cilium operator
|
||
cilium_operator_extra_volumes: []
|
||
cilium_operator_extra_volume_mounts: []
|
||
|
||
# Extra arguments for the Cilium Operator
|
||
cilium_operator_custom_args: []
|
||
|
||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||
cilium_cluster_name: default
|
||
|
||
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
|
||
# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime.
|
||
# Available for Cilium v1.10 and up.
|
||
cilium_cni_exclusive: true
|
||
|
||
# Configure the log file for CNI logging with retention policy of 7 days.
|
||
# Disable CNI file logging by setting this field to empty explicitly.
|
||
# Available for Cilium v1.12 and up.
|
||
cilium_cni_log_file: "/var/run/cilium/cilium-cni.log"
|
||
|
||
# -- Configure cgroup related configuration
|
||
# -- Enable auto mount of cgroup2 filesystem.
|
||
# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at
|
||
# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod.
|
||
# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted
|
||
# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the
|
||
# volume will be mounted inside the cilium agent pod at the same path.
|
||
# Available for Cilium v1.11 and up
|
||
cilium_cgroup_auto_mount: true
|
||
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host
|
||
cilium_cgroup_host_root: "/run/cilium/cgroupv2"
|
||
|
||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||
cilium_bpf_map_dynamic_size_ratio: "0.0025"
|
||
|
||
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
|
||
# Available for Cilium v1.10 and up
|
||
cilium_enable_ipv4_masquerade: true
|
||
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
|
||
# Available for Cilium v1.10 and up
|
||
cilium_enable_ipv6_masquerade: true
|
||
|
||
# -- Enable native IP masquerade support in eBPF
|
||
cilium_enable_bpf_masquerade: false
|
||
|
||
# -- Configure whether direct routing mode should route traffic via
|
||
# host stack (true) or directly and more efficiently out of BPF (false) if
|
||
# the kernel supports it. The latter has the implication that it will also
|
||
# bypass netfilter in the host namespace.
|
||
cilium_enable_host_legacy_routing: true
|
||
|
||
# -- Enable use of the remote node identity.
|
||
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
|
||
cilium_enable_remote_node_identity: true
|
||
|
||
# -- Enable the use of well-known identities.
|
||
cilium_enable_well_known_identities: false
|
||
|
||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||
# first observation, cause monitor notifications to be generated.
|
||
#
|
||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||
cilium_monitor_aggregation_flags: "all"
|
||
|
||
cilium_enable_bpf_clock_probe: true
|
||
|
||
# -- Whether to enable CNP status updates.
|
||
cilium_disable_cnp_status_updates: true
|
||
|
||
# Configure how long to wait for the Cilium DaemonSet to be ready again
|
||
cilium_rolling_restart_wait_retries_count: 30
|
||
cilium_rolling_restart_wait_retries_delay_seconds: 10
|
||
|
||
# Cilium changed the default metrics exporter ports in 1.12
|
||
cilium_agent_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9962', '9090') }}"
|
||
cilium_operator_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9963', '6942') }}"
|
||
cilium_hubble_scrape_port: "{{ cilium_version | regex_replace('v') is version('1.12', '>=') | ternary('9965', '9091') }}"
|
||
|
||
# Cilium certgen args for generate certificate for hubble mTLS
|
||
cilium_certgen_args:
|
||
cilium-namespace: kube-system
|
||
ca-reuse-secret: true
|
||
ca-secret-name: hubble-ca-secret
|
||
ca-generate: true
|
||
ca-validity-duration: 94608000s
|
||
hubble-server-cert-generate: true
|
||
hubble-server-cert-common-name: '*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io'
|
||
hubble-server-cert-validity-duration: 94608000s
|
||
hubble-server-cert-secret-name: hubble-server-certs
|
||
hubble-relay-client-cert-generate: true
|
||
hubble-relay-client-cert-common-name: '*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io'
|
||
hubble-relay-client-cert-validity-duration: 94608000s
|
||
hubble-relay-client-cert-secret-name: hubble-relay-client-certs
|
||
hubble-relay-server-cert-generate: false
|