Merge pull request #829 from bogdando/opts

Rework group/role vars
pull/1065/head
Antoine Legrand 2017-02-23 10:39:43 +01:00 committed by GitHub
commit 403fea39f7
17 changed files with 263 additions and 201 deletions

View File

@ -0,0 +1 @@
../../inventory/group_vars

View File

@ -1 +0,0 @@
../../../../inventory/group_vars/all.yml

View File

@ -67,7 +67,9 @@ Group vars and overriding variables precedence
---------------------------------------------- ----------------------------------------------
The group variables to control main deployment options are located in the directory ``inventory/group_vars``. The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
Optional variables are located in the ```inventory/group_vars/all.yml```.
Mandatory variables that are common for at least one role (or a node group) can be found in the
```inventory/group_vars/k8s-cluster.yml```.
There are also role vars for docker, rkt, kubernetes preinstall and master roles. There are also role vars for docker, rkt, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable), According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overriden from the group vars. In order to override, one should use those cannot be overriden from the group vars. In order to override, one should use

View File

@ -1,176 +1,60 @@
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Directory where the binaries will be installed ## The access_ip variable is used to define how other nodes should access
bin_dir: /usr/local/bin ## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
#access_ip: 1.1.1.1
# Kubernetes configuration dirs and system namespace. ### LOADBALANCING AND ACCESS MODES
# Those are where all the additional config stuff goes ## Enable multiaccess to configure etcd clients to access all of the etcd members directly
# the kubernetes normally puts in /srv/kubernets. ## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This puts them in a sane location and namespace. ## This may be the case if clients support and loadbalance multiple etcd servers natively.
# Editing those values will almost surely break something. #etcd_multiaccess: true
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# This is where all the cert scripts and certs will be located ## External LB example config
kube_cert_dir: "{{ kube_config_dir }}/ssl" ## apiserver_loadbalancer_domain_name: "elb.some.domain"
#loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
# This is where all of the bearer tokens will be stored ## Internal loadbalancers for apiservers
kube_token_dir: "{{ kube_config_dir }}/tokens" #loadbalancer_apiserver_localhost: true
# This is where to save basic auth file ## Local loadbalancer should use this port instead, if defined.
kube_users_dir: "{{ kube_config_dir }}/users" ## Defaults to kube_apiserver_port (443)
#nginx_kube_apiserver_port: 8443
## Change this to use another Kubernetes version, e.g. a current beta release ### OTHER OPTIONAL VARIABLES
kube_version: v1.5.3 ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
# kubelet_load_modules: false
# Where the binaries will be downloaded. ## Internal network total size. This is the prefix of the
# Note: ensure that you've enough disk space (about 1G) ## entire network. Must be unused in your environment.
local_release_dir: "/tmp/releases" #kube_network_prefix: 18
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# Uncomment this line for CoreOS only. ## With calico it is possible to distributed routes with border routers of the datacenter.
# Directory where python binary is installed ## Warning : enabling router peering will disable calico's default behavior ('node mesh').
# ansible_python_interpreter: "/opt/bin/python" ## The subnets of each nodes will be distributed by the datacenter router
#peer_with_router: false
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
# not implemented. As the new flag defaults to true, we have to explicitly disable it. Change this line if you want the
# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
kube_api_anonymous_auth: false
#
# For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
# for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
# processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
# modules.
#
kubelet_load_modules: false
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "{{kube_api_pwd}}"
role: admin
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# For some environments, each node has a publicly accessible
# address and an address it should bind services to. These are
# really inventory level variables, but described here for consistency.
#
# When advertising access, the access_ip will be used, but will defer to
# ip and then the default ansible ip when unspecified.
#
# When binding to restrict access, the ip variable will be used, but will
# defer to the default ansible ip when unspecified.
#
# The ip variable is used for specific address binding, e.g. listen address
# for etcd. This is use to help with environments like Vagrant or multi-nic
# systems where one address should be preferred over another.
# ip: 10.2.2.2
#
# The access_ip variable is used to define how other nodes should access
# the node. This is used in flannel to allow other flannel nodes to see
# this node for example. The access_ip is really useful AWS and Google
# environments where the nodes are accessed remotely by the "public" ip,
# but don't know about that address themselves.
# access_ip: 1.1.1.1
# Etcd access modes:
# Enable multiaccess to configure clients to access all of the etcd members directly
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: true
# Assume there are no internal loadbalancers for apiservers exist and listen on
# kube_apiserver_port (default 443)
loadbalancer_apiserver_localhost: true
# Choose network plugin (calico, canal, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire network. Must be unused in your environment.
# kube_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
kube_network_node_prefix: 24
# With calico it is possible to distributed routes with border routers of the datacenter.
peer_with_router: false
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
# The subnets of each nodes will be distributed by the datacenter router
# API Server service IP address in Kubernetes internal network.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
# The port the API Server will be listening on.
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# local loadbalancer should use this port instead - default to kube_apiserver_port
nginx_kube_apiserver_port: "{{ kube_apiserver_port }}"
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
## Upstream dns servers used by dnsmasq ## Upstream dns servers used by dnsmasq
#upstream_dns_servers: #upstream_dns_servers:
# - 8.8.8.8 # - 8.8.8.8
# - 8.8.4.4 # - 8.8.4.4
dns_domain: "{{ cluster_name }}" ## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook.
#cloud_provider:
# Ip address of the kubernetes skydns service ## When azure is used, you need to also set the following variables.
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" ## see docs/azure.md for details on how to get these values
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
# There are some changes specific to the cloud providers
# for instance we need to encapsulate packets with some network plugins
# If set the possible values are either 'gce', 'aws', 'azure' or 'openstack'
# When openstack is used make sure to source in the openstack credentials
# like you would do when using nova-client before starting the playbook.
# When azure is used, you need to also set the following variables.
# cloud_provider:
# see docs/azure.md for details on how to get these values
#azure_tenant_id: #azure_tenant_id:
#azure_subscription_id: #azure_subscription_id:
#azure_aad_client_id: #azure_aad_client_id:
@ -182,46 +66,25 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
#azure_vnet_name: #azure_vnet_name:
#azure_route_table_name: #azure_route_table_name:
## Set these proxy values in order to update docker daemon to use proxies ## Set these proxy values in order to update docker daemon to use proxies
# http_proxy: "" #http_proxy: ""
# https_proxy: "" #https_proxy: ""
# no_proxy: "" #no_proxy: ""
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
docker_bin_dir: "/usr/bin"
## Uncomment this if you want to force overlay/overlay2 as docker storage driver ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels ## Please note that overlay2 is only supported on newer kernels
#docker_storage_options: -s overlay2 #docker_storage_options: -s overlay2
# K8s image pull policy (imagePullPolicy) ## Default packages to install within the cluster, f.e:
k8s_image_pull_policy: IfNotPresent #kpm_packages:
# default packages to install within the cluster
kpm_packages: []
# - name: kube-system/grafana # - name: kube-system/grafana
# Settings for containerized control plane (etcd/kubelet)
rkt_version: 1.21.0
etcd_deployment_type: docker
kubelet_deployment_type: docker
vault_deployment_type: docker
efk_enabled: false
## Certificate Management ## Certificate Management
## This setting determines whether certs are generated via scripts or whether a ## This setting determines whether certs are generated via scripts or whether a
## cluster of Hashicorp's Vault is started to issue certificates (using etcd ## cluster of Hashicorp's Vault is started to issue certificates (using etcd
## as a backend). Options are "script" or "vault" ## as a backend). Options are "script" or "vault"
cert_management: script #cert_management: script
# Please specify true if you want to perform a kernel upgrade ## Please specify true if you want to perform a kernel upgrade
kernel_upgrade: false #kernel_upgrade: false

View File

@ -0,0 +1,33 @@
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
kube_service_addresses: 10.233.0.0/18
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"

View File

@ -0,0 +1,38 @@
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# Settings for containerized control plane (etcd/secrets)
etcd_deployment_type: docker
cert_management: script
vault_deployment_type: docker
kube_service_addresses: 10.233.0.0/18
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"

View File

@ -0,0 +1,113 @@
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.5.3
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "{{kube_api_pwd}}"
role: admin
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
kube_network_node_prefix: 24
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
docker_bin_dir: "/usr/bin"
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: docker
cert_management: script
vault_deployment_type: docker
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Monitoring apps for k8s
efk_enabled: false

View File

@ -0,0 +1,9 @@
# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the
# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
kube_api_anonymous_auth: false
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
kube_version: v1.5.3

View File

@ -1,4 +1,6 @@
--- ---
kube_cert_group: kube-cert
addusers: addusers:
etcd: etcd:
name: etcd name: etcd

View File

@ -18,6 +18,7 @@ download_localhost: False
download_always_pull: False download_always_pull: False
# Versions # Versions
kube_version: v1.5.3
etcd_version: v3.0.6 etcd_version: v3.0.6
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download # after migration to container download

View File

@ -4,7 +4,7 @@
name: "kpm" name: "kpm"
state: "present" state: "present"
version: "0.16.1" version: "0.16.1"
when: kpm_packages | length > 0 when: kpm_packages|default([])| length > 0
- name: manage kubernetes applications - name: manage kubernetes applications
kpm: kpm:
@ -14,7 +14,7 @@
version: "{{item.version | default(omit)}}" version: "{{item.version | default(omit)}}"
variables: "{{item.variables | default(omit)}}" variables: "{{item.variables | default(omit)}}"
name: "{{item.name}}" name: "{{item.name}}"
with_items: "{{kpm_packages}}" with_items: "{{kpm_packages|default([])}}"
register: kpmresults register: kpmresults
environment: environment:
PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}" PATH: "{{ ansible_env.PATH }}:{{ bin_dir }}"

View File

@ -16,7 +16,7 @@ stream {
} }
server { server {
listen 127.0.0.1:{{ nginx_kube_apiserver_port }}; listen 127.0.0.1:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }};
proxy_pass kube_apiserver; proxy_pass kube_apiserver;
proxy_timeout 10m; proxy_timeout 10m;
proxy_connect_timeout 1s; proxy_connect_timeout 1s;

View File

@ -17,6 +17,8 @@ common_required_pkgs:
# GCE docker repository # GCE docker repository
disable_ipv6_dns: false disable_ipv6_dns: false
kube_cert_group: kube-cert
kube_config_dir: /etc/kubernetes
# For the openstack integration kubelet will need credentials to access # For the openstack integration kubelet will need credentials to access
# openstack apis like nova and cinder. Per default this values will be # openstack apis like nova and cinder. Per default this values will be
@ -27,9 +29,6 @@ openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
# All clients access each node individually, instead of using a load balancer.
etcd_multiaccess: true
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs # for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf

View File

@ -20,8 +20,8 @@
- set_fact: - set_fact:
kube_apiserver_endpoint: |- kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%} {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%}
https://localhost:{{ nginx_kube_apiserver_port }} https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master and loadbalancer_apiserver is not defined -%} {%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }} http://127.0.0.1:{{ kube_apiserver_insecure_port }}
{%- else -%} {%- else -%}
@ -57,7 +57,7 @@
{%- endfor %} {%- endfor %}
- set_fact: - set_fact:
etcd_access_endpoint: "{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}" etcd_access_endpoint: "{% if etcd_multiaccess|default(true) %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact: - set_fact:
etcd_member_name: |- etcd_member_name: |-

View File

@ -0,0 +1,2 @@
---
kube_cert_group: kube-cert

View File

@ -223,7 +223,7 @@
"apiVersion": "v1", "apiVersion": "v1",
"metadata": {"node": "{{ inventory_hostname }}", "metadata": {"node": "{{ inventory_hostname }}",
"scope": "node", "scope": "node",
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"]) }}"} "peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(hostvars[item]["ansible_default_ipv4.address"]) }}"}
}' }'
| {{ bin_dir }}/calicoctl create --skip-exists -f - | {{ bin_dir }}/calicoctl create --skip-exists -f -
with_items: "{{ groups['calico-rr'] | default([]) }}" with_items: "{{ groups['calico-rr'] | default([]) }}"
@ -245,7 +245,7 @@
peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']) peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'])
- name: Calico (old) | Configure peering with route reflectors - name: Calico (old) | Configure peering with route reflectors
shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip']) }} as {{ local_as | default(global_as_num) }}" shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip'])|default(hostvars[item]['ansible_default_ipv4.address']) }} as {{ local_as | default(global_as_num) }}"
with_items: "{{ groups['calico-rr'] | default([]) }}" with_items: "{{ groups['calico-rr'] | default([]) }}"
when: (legacy_calicoctl and when: (legacy_calicoctl and
peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster']

View File

@ -1,6 +1,6 @@
--- ---
rkt_version: 1.12.0 rkt_version: 1.21.0
rkt_pkg_version: "{{ rkt_version }}-1" rkt_pkg_version: "{{ rkt_version }}-1"
rkt_download_src: https://github.com/coreos/rkt rkt_download_src: https://github.com/coreos/rkt
rkt_download_url: "{{ rkt_download_src }}/releases/download/v{{ rkt_version }}" rkt_download_url: "{{ rkt_download_src }}/releases/download/v{{ rkt_version }}"