Rename ansible groups to use _ instead of - (#7552)
* rename ansible groups to use _ instead of - k8s-cluster -> k8s_cluster k8s-node -> k8s_node calico-rr -> calico_rr no-floating -> no_floating Note: kube-node,k8s-cluster groups in upgrade CI need clean-up after v2.16 is tagged * ensure old groups are mapped to the new onespull/7570/head
parent
d26191373a
commit
360aff4a57
|
@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||||
|
|
||||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
cat inventory/mycluster/group_vars/all/all.yml
|
cat inventory/mycluster/group_vars/all/all.yml
|
||||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml
|
||||||
|
|
||||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||||
|
|
|
@ -254,8 +254,8 @@ Vagrant.configure("2") do |config|
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||||
"k8s-cluster:children" => ["kube_control_plane", "kube-node"],
|
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
24
cluster.yml
24
cluster.yml
|
@ -2,14 +2,8 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
- name: Add kube-master nodes to kube_control_plane
|
- name: Ensure compatibility with old groups
|
||||||
# This is for old inventory which contains kube-master instead of kube_control_plane
|
import_playbook: legacy_groups.yml
|
||||||
hosts: kube-master
|
|
||||||
gather_facts: false
|
|
||||||
tasks:
|
|
||||||
- name: add nodes to kube_control_plane group
|
|
||||||
group_by:
|
|
||||||
key: 'kube_control_plane'
|
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
|
@ -18,7 +12,7 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd
|
- hosts: k8s_cluster:etcd
|
||||||
strategy: linear
|
strategy: linear
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
@ -31,7 +25,7 @@
|
||||||
tags: always
|
tags: always
|
||||||
import_playbook: facts.yml
|
import_playbook: facts.yml
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd
|
- hosts: k8s_cluster:etcd
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -54,7 +48,7 @@
|
||||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||||
when: not etcd_kubeadm_enabled| default(false)
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -67,7 +61,7 @@
|
||||||
etcd_events_cluster_setup: false
|
etcd_events_cluster_setup: false
|
||||||
when: not etcd_kubeadm_enabled| default(false)
|
when: not etcd_kubeadm_enabled| default(false)
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -85,7 +79,7 @@
|
||||||
- { role: kubernetes/client, tags: client }
|
- { role: kubernetes/client, tags: client }
|
||||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -95,7 +89,7 @@
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
- { role: kubernetes/node-label, tags: node-label }
|
- { role: kubernetes/node-label, tags: node-label }
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico_rr
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -131,7 +125,7 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s_cluster
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
|
|
@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
||||||
hosts['_meta'] = { 'hostvars': {} }
|
hosts['_meta'] = { 'hostvars': {} }
|
||||||
|
|
||||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||||
for group in ["kube_control_plane", "kube-node", "etcd"]:
|
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||||
hosts[group] = []
|
hosts[group] = []
|
||||||
tag_key = "kubespray-role"
|
tag_key = "kubespray-role"
|
||||||
tag_value = ["*"+group+"*"]
|
tag_value = ["*"+group+"*"]
|
||||||
|
@ -70,7 +70,7 @@ class SearchEC2Tags(object):
|
||||||
hosts[group].append(dns_name)
|
hosts[group].append(dns_name)
|
||||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
|
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||||
|
|
||||||
SearchEC2Tags()
|
SearchEC2Tags()
|
||||||
|
|
|
@ -21,13 +21,13 @@
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
{% for vm in vm_list %}
|
{% for vm in vm_list %}
|
||||||
{% if 'kube-node' in vm.tags.roles %}
|
{% if 'kube_node' in vm.tags.roles %}
|
||||||
{{ vm.name }}
|
{{ vm.name }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
|
|
|
@ -21,14 +21,14 @@
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
{% for vm in vm_roles_list %}
|
{% for vm in vm_roles_list %}
|
||||||
{% if 'kube-node' in vm.tags.roles %}
|
{% if 'kube_node' in vm.tags.roles %}
|
||||||
{{ vm.name }}
|
{{ vm.name }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||||
],
|
],
|
||||||
"tags": {
|
"tags": {
|
||||||
"roles": "kube-node"
|
"roles": "kube_node"
|
||||||
},
|
},
|
||||||
"apiVersion": "{{apiVersion}}",
|
"apiVersion": "{{apiVersion}}",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -112,4 +112,4 @@
|
||||||
} {% if not loop.last %},{% endif %}
|
} {% if not loop.last %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ test_distro() {
|
||||||
pass_or_fail "$prefix: netcheck" || return 1
|
pass_or_fail "$prefix: netcheck" || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
NODES=($(egrep ^kube-node hosts))
|
NODES=($(egrep ^kube_node hosts))
|
||||||
NETCHECKER_HOST=localhost
|
NETCHECKER_HOST=localhost
|
||||||
|
|
||||||
: ${OUTPUT_DIR:=./out}
|
: ${OUTPUT_DIR:=./out}
|
||||||
|
|
|
@ -44,8 +44,8 @@ import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
|
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||||
'calico-rr']
|
'calico_rr']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||||
'load']
|
'load']
|
||||||
|
@ -269,7 +269,7 @@ class KubesprayInventory(object):
|
||||||
|
|
||||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||||
for role in self.yaml_config['all']['children']:
|
for role in self.yaml_config['all']['children']:
|
||||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||||
for host in all_hosts.keys():
|
for host in all_hosts.keys():
|
||||||
if host not in hostnames and host not in protected_names:
|
if host not in hostnames and host not in protected_names:
|
||||||
|
@ -290,7 +290,7 @@ class KubesprayInventory(object):
|
||||||
if self.yaml_config['all']['hosts'] is None:
|
if self.yaml_config['all']['hosts'] is None:
|
||||||
self.yaml_config['all']['hosts'] = {host: None}
|
self.yaml_config['all']['hosts'] = {host: None}
|
||||||
self.yaml_config['all']['hosts'][host] = opts
|
self.yaml_config['all']['hosts'][host] = opts
|
||||||
elif group != 'k8s-cluster:children':
|
elif group != 'k8s_cluster:children':
|
||||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||||
host: None}
|
host: None}
|
||||||
|
@ -307,37 +307,37 @@ class KubesprayInventory(object):
|
||||||
|
|
||||||
def set_k8s_cluster(self):
|
def set_k8s_cluster(self):
|
||||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||||
'kube-node': None}}
|
'kube_node': None}}
|
||||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||||
|
|
||||||
def set_calico_rr(self, hosts):
|
def set_calico_rr(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||||
self.debug("Not adding {0} to calico-rr group because it "
|
self.debug("Not adding {0} to calico_rr group because it "
|
||||||
"conflicts with kube_control_plane "
|
"conflicts with kube_control_plane "
|
||||||
"group".format(host))
|
"group".format(host))
|
||||||
continue
|
continue
|
||||||
if host in self.yaml_config['all']['children']['kube-node']:
|
if host in self.yaml_config['all']['children']['kube_node']:
|
||||||
self.debug("Not adding {0} to calico-rr group because it "
|
self.debug("Not adding {0} to calico_rr group because it "
|
||||||
"conflicts with kube-node group".format(host))
|
"conflicts with kube_node group".format(host))
|
||||||
continue
|
continue
|
||||||
self.add_host_to_group('calico-rr', host)
|
self.add_host_to_group('calico_rr', host)
|
||||||
|
|
||||||
def set_kube_node(self, hosts):
|
def set_kube_node(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||||
self.debug("Not adding {0} to kube-node group because of "
|
self.debug("Not adding {0} to kube_node group because of "
|
||||||
"scale deployment and host is in etcd "
|
"scale deployment and host is in etcd "
|
||||||
"group.".format(host))
|
"group.".format(host))
|
||||||
continue
|
continue
|
||||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||||
self.debug("Not adding {0} to kube-node group because of "
|
self.debug("Not adding {0} to kube_node group because of "
|
||||||
"scale deployment and host is in "
|
"scale deployment and host is in "
|
||||||
"kube_control_plane group.".format(host))
|
"kube_control_plane group.".format(host))
|
||||||
continue
|
continue
|
||||||
self.add_host_to_group('kube-node', host)
|
self.add_host_to_group('kube_node', host)
|
||||||
|
|
||||||
def set_etcd(self, hosts):
|
def set_etcd(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
|
|
|
@ -241,8 +241,8 @@ class TestInventory(unittest.TestCase):
|
||||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||||
|
|
||||||
def test_set_k8s_cluster(self):
|
def test_set_k8s_cluster(self):
|
||||||
group = 'k8s-cluster'
|
group = 'k8s_cluster'
|
||||||
expected_hosts = ['kube-node', 'kube_control_plane']
|
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||||
|
|
||||||
self.inv.set_k8s_cluster()
|
self.inv.set_k8s_cluster()
|
||||||
for host in expected_hosts:
|
for host in expected_hosts:
|
||||||
|
@ -251,7 +251,7 @@ class TestInventory(unittest.TestCase):
|
||||||
self.inv.yaml_config['all']['children'][group]['children'])
|
self.inv.yaml_config['all']['children'][group]['children'])
|
||||||
|
|
||||||
def test_set_kube_node(self):
|
def test_set_kube_node(self):
|
||||||
group = 'kube-node'
|
group = 'kube_node'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_kube_node([host])
|
self.inv.set_kube_node([host])
|
||||||
|
@ -280,7 +280,7 @@ class TestInventory(unittest.TestCase):
|
||||||
for h in range(3):
|
for h in range(3):
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
list(hosts.keys())[h] in
|
list(hosts.keys())[h] in
|
||||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||||
|
|
||||||
def test_scale_scenario_two(self):
|
def test_scale_scenario_two(self):
|
||||||
num_nodes = 500
|
num_nodes = 500
|
||||||
|
@ -296,7 +296,7 @@ class TestInventory(unittest.TestCase):
|
||||||
for h in range(5):
|
for h in range(5):
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
list(hosts.keys())[h] in
|
list(hosts.keys())[h] in
|
||||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||||
|
|
||||||
def test_range2ips_range(self):
|
def test_range2ips_range(self):
|
||||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s_cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
|
|
|
@ -23,15 +23,15 @@
|
||||||
# node2
|
# node2
|
||||||
# node3
|
# node3
|
||||||
|
|
||||||
# [kube-node]
|
# [kube_node]
|
||||||
# node2
|
# node2
|
||||||
# node3
|
# node3
|
||||||
# node4
|
# node4
|
||||||
# node5
|
# node5
|
||||||
# node6
|
# node6
|
||||||
|
|
||||||
# [k8s-cluster:children]
|
# [k8s_cluster:children]
|
||||||
# kube-node
|
# kube_node
|
||||||
# kube_control_plane
|
# kube_control_plane
|
||||||
|
|
||||||
# [gfs-cluster]
|
# [gfs-cluster]
|
||||||
|
|
|
@ -3,7 +3,7 @@ all:
|
||||||
heketi_admin_key: "11elfeinhundertundelf"
|
heketi_admin_key: "11elfeinhundertundelf"
|
||||||
heketi_user_key: "!!einseinseins"
|
heketi_user_key: "!!einseinseins"
|
||||||
children:
|
children:
|
||||||
k8s-cluster:
|
k8s_cluster:
|
||||||
vars:
|
vars:
|
||||||
kubelet_fail_swap_on: false
|
kubelet_fail_swap_on: false
|
||||||
children:
|
children:
|
||||||
|
@ -13,7 +13,7 @@ all:
|
||||||
etcd:
|
etcd:
|
||||||
hosts:
|
hosts:
|
||||||
node2:
|
node2:
|
||||||
kube-node:
|
kube_node:
|
||||||
hosts: &kube_nodes
|
hosts: &kube_nodes
|
||||||
node1:
|
node1:
|
||||||
node2:
|
node2:
|
||||||
|
|
|
@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
|
||||||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||||
%license %{_docdir}/%{name}/LICENSE
|
%license %{_docdir}/%{name}/LICENSE
|
||||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||||
%{_datarootdir}/%{name}/roles/
|
%{_datarootdir}/%{name}/roles/
|
||||||
|
|
|
@ -11,7 +11,7 @@ ${public_ip_address_bastion}
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
${list_node}
|
${list_node}
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,10 +19,10 @@ ${list_node}
|
||||||
${list_etcd}
|
${list_etcd}
|
||||||
|
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
|
|
||||||
|
|
||||||
[k8s-cluster:vars]
|
[k8s_cluster:vars]
|
||||||
${elb_api_fqdn}
|
${elb_api_fqdn}
|
||||||
|
|
|
@ -11,9 +11,9 @@ supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||||
[etcd]
|
[etcd]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
kube-node
|
kube_node
|
||||||
|
|
|
@ -65,12 +65,12 @@ for name in "${MASTER_NAMES[@]}"; do
|
||||||
done
|
done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "[kube-node]"
|
echo "[kube_node]"
|
||||||
for name in "${WORKER_NAMES[@]}"; do
|
for name in "${WORKER_NAMES[@]}"; do
|
||||||
echo "${name}"
|
echo "${name}"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "[k8s-cluster:children]"
|
echo "[k8s_cluster:children]"
|
||||||
echo "kube_control_plane"
|
echo "kube_control_plane"
|
||||||
echo "kube-node"
|
echo "kube_node"
|
||||||
|
|
|
@ -263,8 +263,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. |
|
||||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||||
|
@ -421,7 +421,7 @@ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
if you chose to create a bastion host, this script will create
|
if you chose to create a bastion host, this script will create
|
||||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to
|
||||||
be able to access your machines tunneling through the bastion's IP address. If
|
be able to access your machines tunneling through the bastion's IP address. If
|
||||||
you want to manually handle the ssh tunneling to these machines, please delete
|
you want to manually handle the ssh tunneling to these machines, please delete
|
||||||
or move that file. If you want to use this, just leave it there, as ansible will
|
or move that file. If you want to use this, just leave it there, as ansible will
|
||||||
|
@ -546,7 +546,7 @@ bin_dir: /opt/bin
|
||||||
cloud_provider: openstack
|
cloud_provider: openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
|
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
|
||||||
|
|
||||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||||
- **flannel** works out-of-the-box
|
- **flannel** works out-of-the-box
|
||||||
|
|
|
@ -204,7 +204,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,13 +245,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
|
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,13 +292,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
|
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -337,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "etcd,no-floating"
|
kubespray_groups = "etcd,no_floating"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
|
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
|
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -462,13 +462,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -507,7 +507,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -548,13 +548,13 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
provisioner "local-exec" {
|
provisioner "local-exec" {
|
||||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
|
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,7 +593,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user_gfs
|
ssh_user = var.ssh_user_gfs
|
||||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
|
|
@ -177,12 +177,12 @@ variable "external_net" {
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "supplementary_master_groups" {
|
variable "supplementary_master_groups" {
|
||||||
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
description = "supplementary kubespray ansible groups for masters, such kube_node"
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "supplementary_node_groups" {
|
variable "supplementary_node_groups" {
|
||||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
|
description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress"
|
||||||
default = ""
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ While the defaults in variables.tf will successfully deploy a cluster, it is rec
|
||||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
`kubeconfig_localhost: true` in the Kubespray configuration.
|
||||||
|
|
||||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`:
|
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||||
`\# kubeconfig_localhost: false`
|
`\# kubeconfig_localhost: false`
|
||||||
becomes:
|
becomes:
|
||||||
`kubeconfig_localhost: true`
|
`kubeconfig_localhost: true`
|
||||||
|
|
|
@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.packet_project_id
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
|
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_master_no_etcd" {
|
resource "packet_device" "k8s_master_no_etcd" {
|
||||||
|
@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.packet_project_id
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
|
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_etcd" {
|
resource "packet_device" "k8s_etcd" {
|
||||||
|
@ -58,6 +58,6 @@ resource "packet_device" "k8s_node" {
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.packet_project_id
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,9 +9,9 @@ ${list_master}
|
||||||
[etcd]
|
[etcd]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
kube-node
|
kube_node
|
||||||
|
|
|
@ -9,9 +9,9 @@ ${list_master}
|
||||||
[etcd]
|
[etcd]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
kube-node
|
kube_node
|
||||||
|
|
|
@ -4,28 +4,28 @@
|
||||||
|
|
||||||
The inventory is composed of 3 groups:
|
The inventory is composed of 3 groups:
|
||||||
|
|
||||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
* **kube_node** : list of kubernetes nodes where the pods will run.
|
||||||
* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
|
* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
|
||||||
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
|
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
|
||||||
|
|
||||||
Note: do not modify the children of _k8s-cluster_, like putting
|
Note: do not modify the children of _k8s_cluster_, like putting
|
||||||
the _etcd_ group into the _k8s-cluster_, unless you are certain
|
the _etcd_ group into the _k8s_cluster_, unless you are certain
|
||||||
to do that and you have it fully contained in the latter:
|
to do that and you have it fully contained in the latter:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
k8s_cluster ⊂ etcd => kube_node ∩ etcd = etcd
|
||||||
```
|
```
|
||||||
|
|
||||||
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
When _kube_node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
||||||
If you want it a standalone, make sure those groups do not intersect.
|
If you want it a standalone, make sure those groups do not intersect.
|
||||||
If you want the server to act both as control-plane and node, the server must be defined
|
If you want the server to act both as control-plane and node, the server must be defined
|
||||||
on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and
|
on both groups _kube_control_plane_ and _kube_node_. If you want a standalone and
|
||||||
unschedulable master, the server must be defined only in the _kube_control_plane_ and
|
unschedulable master, the server must be defined only in the _kube_control_plane_ and
|
||||||
not _kube-node_.
|
not _kube_node_.
|
||||||
|
|
||||||
There are also two special groups:
|
There are also two special groups:
|
||||||
|
|
||||||
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
|
* **calico_rr** : explained for [advanced Calico networking cases](calico.md)
|
||||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||||
|
|
||||||
Below is a complete inventory example:
|
Below is a complete inventory example:
|
||||||
|
@ -49,15 +49,15 @@ node1
|
||||||
node2
|
node2
|
||||||
node3
|
node3
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
node2
|
node2
|
||||||
node3
|
node3
|
||||||
node4
|
node4
|
||||||
node5
|
node5
|
||||||
node6
|
node6
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ kube_control_plane
|
||||||
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
|
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
|
||||||
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
||||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||||
`inventory/sample/group_vars/k8s-cluster.yml`.
|
`inventory/sample/group_vars/k8s_cluster.yml`.
|
||||||
There are also role vars for docker, kubernetes preinstall and master roles.
|
There are also role vars for docker, kubernetes preinstall and master roles.
|
||||||
According to the [ansible docs](https://docs.ansible.com/ansible/latest/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
According to the [ansible docs](https://docs.ansible.com/ansible/latest/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||||
those cannot be overridden from the group vars. In order to override, one should use
|
those cannot be overridden from the group vars. In order to override, one should use
|
||||||
|
@ -79,7 +79,7 @@ Layer | Comment
|
||||||
------|--------
|
------|--------
|
||||||
**role defaults** | provides best UX to override things for Kubespray deployments
|
**role defaults** | provides best UX to override things for Kubespray deployments
|
||||||
inventory vars | Unused
|
inventory vars | Unused
|
||||||
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
**inventory group_vars** | Expects users to use ``all.yml``,``k8s_cluster.yml`` etc. to override things
|
||||||
inventory host_vars | Unused
|
inventory host_vars | Unused
|
||||||
playbook group_vars | Unused
|
playbook group_vars | Unused
|
||||||
playbook host_vars | Unused
|
playbook host_vars | Unused
|
||||||
|
|
|
@ -8,7 +8,7 @@ To set the number of replicas for the AWS CSI controller, you can change `aws_eb
|
||||||
|
|
||||||
Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json)
|
Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json)
|
||||||
|
|
||||||
If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
|
||||||
|
|
||||||
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled.
|
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled.
|
||||||
|
|
||||||
|
|
|
@ -33,16 +33,16 @@ This will produce an inventory that is passed into Ansible that looks like the f
|
||||||
"etcd": [
|
"etcd": [
|
||||||
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
],
|
],
|
||||||
"k8s-cluster": {
|
"k8s_cluster": {
|
||||||
"children": [
|
"children": [
|
||||||
"kube_control_plane",
|
"kube_control_plane",
|
||||||
"kube-node"
|
"kube_node"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"kube_control_plane": [
|
"kube_control_plane": [
|
||||||
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
],
|
],
|
||||||
"kube-node": [
|
"kube_node": [
|
||||||
"ip-172-31-8-xxx.us-east-2.compute.internal"
|
"ip-172-31-8-xxx.us-east-2.compute.internal"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f
|
||||||
Guide:
|
Guide:
|
||||||
|
|
||||||
- Create instances in AWS as needed.
|
- Create instances in AWS as needed.
|
||||||
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd`
|
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube_node`. You can also share roles like `kube_control_plane, etcd`
|
||||||
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||||
- Set the following AWS credentials and info as environment variables in your terminal:
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ To deploy Azure Disk CSI driver, uncomment the `azure_csi_enabled` option in `gr
|
||||||
|
|
||||||
## Azure Disk CSI Storage Class
|
## Azure Disk CSI Storage Class
|
||||||
|
|
||||||
If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ calico_network_backend: none
|
||||||
### Optional : Define the default pool CIDRs
|
### Optional : Define the default pool CIDRs
|
||||||
|
|
||||||
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6.
|
By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6.
|
||||||
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s-cluster/k8s-net-calico.yml):
|
In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s_cluster/k8s-net-calico.yml):
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
calico_pool_cidr: 10.233.64.0/20
|
calico_pool_cidr: 10.233.64.0/20
|
||||||
|
@ -88,14 +88,14 @@ In order to define peers on a per node basis, the `peers` variable must be defin
|
||||||
NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers)
|
NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers)
|
||||||
|
|
||||||
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
|
Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs.
|
||||||
This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml)
|
This can be enabled by setting the following variable as follow in group_vars (k8s_cluster/k8s-net-calico.yml)
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
calico_advertise_cluster_ips: true
|
calico_advertise_cluster_ips: true
|
||||||
```
|
```
|
||||||
|
|
||||||
Since calico 3.10, Calico supports advertising Kubernetes service ExternalIPs over BGP in addition to cluster IPs advertising.
|
Since calico 3.10, Calico supports advertising Kubernetes service ExternalIPs over BGP in addition to cluster IPs advertising.
|
||||||
This can be enabled by setting the following variable in group_vars (k8s-cluster/k8s-net-calico.yml)
|
This can be enabled by setting the following variable in group_vars (k8s_cluster/k8s-net-calico.yml)
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
calico_advertise_service_external_ips:
|
calico_advertise_service_external_ips:
|
||||||
|
@ -121,9 +121,9 @@ recommended here:
|
||||||
|
|
||||||
You need to edit your inventory and add:
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
|
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||||
`kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child
|
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||||
group of `k8s-cluster` group.
|
group of `k8s_cluster` group.
|
||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
|
||||||
|
@ -147,18 +147,18 @@ node2
|
||||||
node3
|
node3
|
||||||
node4
|
node4
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
node2
|
node2
|
||||||
node3
|
node3
|
||||||
node4
|
node4
|
||||||
node5
|
node5
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
calico-rr
|
calico_rr
|
||||||
|
|
||||||
[calico-rr]
|
[calico_rr]
|
||||||
rr0
|
rr0
|
||||||
rr1
|
rr1
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ You need to source the OpenStack credentials you use to deploy your machines tha
|
||||||
|
|
||||||
Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected.
|
Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected.
|
||||||
|
|
||||||
If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
|
||||||
|
|
||||||
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled.
|
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled.
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ Kubespray supports basic functionality for using containerd as the default conta
|
||||||
|
|
||||||
_To use the containerd container runtime set the following variables:_
|
_To use the containerd container runtime set the following variables:_
|
||||||
|
|
||||||
## k8s-cluster.yml
|
## k8s_cluster.yml
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
container_manager: containerd
|
container_manager: containerd
|
||||||
|
|
|
@ -16,7 +16,7 @@ skip_downloads: false
|
||||||
etcd_kubeadm_enabled: true
|
etcd_kubeadm_enabled: true
|
||||||
```
|
```
|
||||||
|
|
||||||
## k8s-cluster/k8s-cluster.yml
|
## k8s_cluster/k8s_cluster.yml
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
container_manager: crio
|
container_manager: crio
|
||||||
|
@ -52,7 +52,7 @@ This parameter controls not just the number of processes but also the amount of
|
||||||
(since a thread is technically a process with shared memory). See [cri-o#1921]
|
(since a thread is technically a process with shared memory). See [cri-o#1921]
|
||||||
|
|
||||||
In order to increase the default `pids_limit` for cri-o based deployments you need to set the `crio_pids_limit`
|
In order to increase the default `pids_limit` for cri-o based deployments you need to set the `crio_pids_limit`
|
||||||
for your `k8s-cluster` ansible group or per node depending on the use case.
|
for your `k8s_cluster` ansible group or per node depending on the use case.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
crio_pids_limit: 4096
|
crio_pids_limit: 4096
|
||||||
|
|
|
@ -6,7 +6,7 @@ To deploy GCP Persistent Disk CSI driver, uncomment the `gcp_pd_csi_enabled` opt
|
||||||
|
|
||||||
## GCP Persistent Disk Storage Class
|
## GCP Persistent Disk Storage Class
|
||||||
|
|
||||||
If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`.
|
||||||
|
|
||||||
## GCP credentials
|
## GCP credentials
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ var in inventory.
|
||||||
By default, Kubespray configures kube_control_plane hosts with insecure access to
|
By default, Kubespray configures kube_control_plane hosts with insecure access to
|
||||||
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||||
generated will point to localhost (on kube_control_planes) and kube-node hosts will
|
generated will point to localhost (on kube_control_planes) and kube_node hosts will
|
||||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
More details on this process are in the [HA guide](/docs/ha-mode.md).
|
More details on this process are in the [HA guide](/docs/ha-mode.md).
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ loadbalancer_apiserver:
|
||||||
port on the VIP address)
|
port on the VIP address)
|
||||||
|
|
||||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
into the `/etc/hosts` file of all servers in the `k8s_cluster` group and wired
|
||||||
into the generated self-signed TLS/SSL certificates as well. Note that
|
into the generated self-signed TLS/SSL certificates as well. Note that
|
||||||
the HAProxy service should as well be HA and requires a VIP management, which
|
the HAProxy service should as well be HA and requires a VIP management, which
|
||||||
is out of scope of this doc.
|
is out of scope of this doc.
|
||||||
|
|
|
@ -52,10 +52,10 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
|
||||||
```ini
|
```ini
|
||||||
...
|
...
|
||||||
#Kargo groups:
|
#Kargo groups:
|
||||||
[kube-node:children]
|
[kube_node:children]
|
||||||
kubenode
|
kubenode
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kubernetes
|
kubernetes
|
||||||
|
|
||||||
[etcd:children]
|
[etcd:children]
|
||||||
|
|
|
@ -10,7 +10,7 @@ _Qemu_ is the only hypervisor supported by Kubespray.
|
||||||
|
|
||||||
To use Kata Containers, set the following variables:
|
To use Kata Containers, set the following variables:
|
||||||
|
|
||||||
**k8s-cluster.yml**:
|
**k8s_cluster.yml**:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
container_manager: containerd
|
container_manager: containerd
|
||||||
|
|
|
@ -12,7 +12,7 @@ kernel version 3.10.0-862 has a nat related bug that will affect ovs function, p
|
||||||
|
|
||||||
## How to use it
|
## How to use it
|
||||||
|
|
||||||
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
|
Enable kube-ovn in `group_vars/k8s_cluster/k8s_cluster.yml`
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
...
|
...
|
||||||
|
|
|
@ -37,9 +37,9 @@ For a large scaled deployments, consider the following configuration changes:
|
||||||
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
||||||
``kube_service_addresses`` and ``kube_pods_subnet``.
|
``kube_service_addresses`` and ``kube_pods_subnet``.
|
||||||
|
|
||||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
* Add calico_rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||||
from host/network interruption much quicker with calico-rr. Note that
|
from host/network interruption much quicker with calico_rr. Note that
|
||||||
calico-rr role must be on a host without kube_control_plane or kube-node role (but
|
calico_rr role must be on a host without kube_control_plane or kube_node role (but
|
||||||
etcd role is okay).
|
etcd role is okay).
|
||||||
|
|
||||||
* Check out the
|
* Check out the
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
## How to use it
|
## How to use it
|
||||||
|
|
||||||
* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml`
|
* Enable macvlan in `group_vars/k8s_cluster/k8s_cluster.yml`
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
...
|
...
|
||||||
|
@ -10,7 +10,7 @@ kube_network_plugin: macvlan
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
|
* Adjust the `macvlan_interface` in `group_vars/k8s_cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file:
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
all:
|
all:
|
||||||
|
@ -34,7 +34,7 @@ add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml`
|
||||||
|
|
||||||
The nodelocal dns IP is not reacheable.
|
The nodelocal dns IP is not reacheable.
|
||||||
|
|
||||||
Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml`
|
Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml`
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
enable_nodelocaldns: false
|
enable_nodelocaldns: false
|
||||||
|
|
|
@ -17,7 +17,7 @@ Modify the order of your master list by pushing your first entry to any other po
|
||||||
node-1:
|
node-1:
|
||||||
node-2:
|
node-2:
|
||||||
node-3:
|
node-3:
|
||||||
kube-node:
|
kube_node:
|
||||||
hosts:
|
hosts:
|
||||||
node-1:
|
node-1:
|
||||||
node-2:
|
node-2:
|
||||||
|
@ -38,7 +38,7 @@ change your inventory to:
|
||||||
node-2:
|
node-2:
|
||||||
node-3:
|
node-3:
|
||||||
node-1:
|
node-1:
|
||||||
kube-node:
|
kube_node:
|
||||||
hosts:
|
hosts:
|
||||||
node-2:
|
node-2:
|
||||||
node-3:
|
node-3:
|
||||||
|
|
|
@ -4,7 +4,7 @@ Intro to [ovn4nfv-k8s-plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin)
|
||||||
|
|
||||||
## How to use it
|
## How to use it
|
||||||
|
|
||||||
* Enable ovn4nfv in `group_vars/k8s-cluster/k8s-cluster.yml`
|
* Enable ovn4nfv in `group_vars/k8s_cluster/k8s_cluster.yml`
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
...
|
...
|
||||||
|
|
|
@ -225,7 +225,7 @@ worker-0, worker-1 and worker-2 are worker nodes. Also update the `ip` to the re
|
||||||
remove the `access_ip`.
|
remove the `access_ip`.
|
||||||
|
|
||||||
The main configuration for the cluster is stored in
|
The main configuration for the cluster is stored in
|
||||||
`inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml`. In this file we
|
`inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml`. In this file we
|
||||||
will update the `supplementary_addresses_in_ssl_keys` with a list of the IP
|
will update the `supplementary_addresses_in_ssl_keys` with a list of the IP
|
||||||
addresses of the controller nodes. In that way we can access the
|
addresses of the controller nodes. In that way we can access the
|
||||||
kubernetes API server as an administrator from outside the VPC network. You
|
kubernetes API server as an administrator from outside the VPC network. You
|
||||||
|
@ -234,7 +234,7 @@ The main configuration for the cluster is stored in
|
||||||
|
|
||||||
Kubespray also offers to easily enable popular kubernetes add-ons. You can
|
Kubespray also offers to easily enable popular kubernetes add-ons. You can
|
||||||
modify the
|
modify the
|
||||||
list of add-ons in `inventory/mycluster/group_vars/k8s-cluster/addons.yml`.
|
list of add-ons in `inventory/mycluster/group_vars/k8s_cluster/addons.yml`.
|
||||||
Let's enable the metrics server as this is a crucial monitoring element for
|
Let's enable the metrics server as this is a crucial monitoring element for
|
||||||
the kubernetes cluster, just change the 'false' to 'true' for
|
the kubernetes cluster, just change the 'false' to 'true' for
|
||||||
`metrics_server_enabled`.
|
`metrics_server_enabled`.
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
|
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
|
||||||
|
|
||||||
`default` is a non-HA two nodes setup with one separate `kube-node`
|
`default` is a non-HA two nodes setup with one separate `kube_node`
|
||||||
and the `etcd` group merged with the `kube_control_plane`.
|
and the `etcd` group merged with the `kube_control_plane`.
|
||||||
|
|
||||||
`separate` layout is when there is only node of each type, which includes
|
`separate` layout is when there is only node of each type, which includes
|
||||||
a kube_control_plane, kube-node, and etcd cluster member.
|
a kube_control_plane, kube_node, and etcd cluster member.
|
||||||
|
|
||||||
`ha` layout consists of two etcd nodes, two masters and a single worker node,
|
`ha` layout consists of two etcd nodes, two masters and a single worker node,
|
||||||
with role intersection.
|
with role intersection.
|
||||||
|
|
|
@ -68,9 +68,9 @@ If you want to manually control the upgrade procedure, you can use the variables
|
||||||
|
|
||||||
For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check out the next tag, and run the next upgrade, etc.
|
For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check out the next tag, and run the next upgrade, etc.
|
||||||
|
|
||||||
Assuming you don't explicitly define a kubernetes version in your k8s-cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
|
Assuming you don't explicitly define a kubernetes version in your k8s_cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
|
||||||
|
|
||||||
* If you do define kubernetes version in your inventory (e.g. group_vars/k8s-cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
|
* If you do define kubernetes version in your inventory (e.g. group_vars/k8s_cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
|
||||||
|
|
||||||
Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars.
|
Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars.
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ Previous HEAD position was 6f97687d Release 2.8 robust san handling (#4478)
|
||||||
HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471)
|
HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471)
|
||||||
```
|
```
|
||||||
|
|
||||||
:warning: IMPORTANT: Some of the variable formats changed in the k8s-cluster.yml between 2.8.5 and 2.9.0 :warning:
|
:warning: IMPORTANT: Some of the variable formats changed in the k8s_cluster.yml between 2.8.5 and 2.9.0 :warning:
|
||||||
|
|
||||||
If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run.
|
If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run.
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ Some variables of note include:
|
||||||
and access_ip are undefined
|
and access_ip are undefined
|
||||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||||
address instead of localhost for kube_control_planes and kube_control_plane[0] for
|
address instead of localhost for kube_control_planes and kube_control_plane[0] for
|
||||||
kube-nodes. See more details in the
|
kube_nodes. See more details in the
|
||||||
[HA guide](/docs/ha-mode.md).
|
[HA guide](/docs/ha-mode.md).
|
||||||
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
||||||
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
||||||
|
@ -59,14 +59,14 @@ following default cluster parameters:
|
||||||
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
||||||
overlap with kube_service_addresses.
|
overlap with kube_service_addresses.
|
||||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
|
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
|
||||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster. Setting this > 25 will
|
bits in kube_pods_subnet dictates how many kube_nodes can be in cluster. Setting this > 25 will
|
||||||
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
|
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
|
||||||
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
||||||
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
|
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
|
||||||
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
|
* *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services.
|
||||||
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
* *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``.
|
||||||
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
|
* *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``.
|
||||||
* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube-nodes can be in cluster.
|
* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster.
|
||||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||||
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||||
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
|
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
|
||||||
|
|
|
@ -11,7 +11,7 @@ Weave encryption is supported for all communication
|
||||||
* To use Weave encryption, specify a strong password (if no password, no encryption)
|
* To use Weave encryption, specify a strong password (if no password, no encryption)
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
# In file ./inventory/sample/group_vars/k8s_cluster.yml
|
||||||
weave_password: EnterPasswordHere
|
weave_password: EnterPasswordHere
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- hosts: kube-node:kube_control_plane
|
- hosts: kube_node:kube_control_plane
|
||||||
tasks:
|
tasks:
|
||||||
- name: Remove old cloud provider config
|
- name: Remove old cloud provider config
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
vars:
|
vars:
|
||||||
|
@ -27,7 +27,7 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s_cluster:etcd:calico_rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
@ -47,7 +47,7 @@
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
- name: Finally handle worker upgrades, based on given batch size
|
- name: Finally handle worker upgrades, based on given batch size
|
||||||
hosts: kube-node:!kube_control_plane
|
hosts: kube_node:!kube_control_plane
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: "{{ serial | default('20%') }}"
|
serial: "{{ serial | default('20%') }}"
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Gather facts
|
- name: Gather facts
|
||||||
hosts: k8s-cluster:etcd:calico-rr
|
hosts: k8s_cluster:etcd:calico_rr
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
tasks:
|
tasks:
|
||||||
- name: Gather minimal facts
|
- name: Gather minimal facts
|
||||||
|
|
|
@ -6,9 +6,9 @@ node1
|
||||||
[etcd]
|
[etcd]
|
||||||
node1
|
node1
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
node1
|
node1
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube-node
|
kube_node
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
|
|
|
@ -23,16 +23,16 @@
|
||||||
# node2
|
# node2
|
||||||
# node3
|
# node3
|
||||||
|
|
||||||
[kube-node]
|
[kube_node]
|
||||||
# node2
|
# node2
|
||||||
# node3
|
# node3
|
||||||
# node4
|
# node4
|
||||||
# node5
|
# node5
|
||||||
# node6
|
# node6
|
||||||
|
|
||||||
[calico-rr]
|
[calico_rr]
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s_cluster:children]
|
||||||
kube_control_plane
|
kube_control_plane
|
||||||
kube-node
|
kube_node
|
||||||
calico-rr
|
calico_rr
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
---
|
||||||
|
# This is an inventory compatibility playbook to ensure we keep compatibility with old style group names
|
||||||
|
|
||||||
|
- name: Add kube-master nodes to kube_control_plane
|
||||||
|
hosts: kube-master
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to kube_control_plane group
|
||||||
|
group_by:
|
||||||
|
key: 'kube_control_plane'
|
||||||
|
|
||||||
|
- name: Add kube-node nodes to kube_node
|
||||||
|
hosts: kube-node
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to kube_node group
|
||||||
|
group_by:
|
||||||
|
key: 'kube_node'
|
||||||
|
|
||||||
|
- name: Add k8s-cluster nodes to k8s_cluster
|
||||||
|
hosts: k8s-cluster
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to k8s_cluster group
|
||||||
|
group_by:
|
||||||
|
key: 'k8s_cluster'
|
||||||
|
|
||||||
|
- name: Add calico-rr nodes to calico_rr
|
||||||
|
hosts: calico-rr
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to calico_rr group
|
||||||
|
group_by:
|
||||||
|
key: 'calico_rr'
|
||||||
|
|
||||||
|
- name: Add no-floating nodes to no_floating
|
||||||
|
hosts: no-floating
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to no-floating group
|
||||||
|
group_by:
|
||||||
|
key: 'no_floating'
|
|
@ -2,14 +2,8 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
- name: Add kube-master nodes to kube_control_plane
|
- name: Ensure compatibility with old groups
|
||||||
# This is for old inventory which contains kube-master instead of kube_control_plane
|
import_playbook: legacy_groups.yml
|
||||||
hosts: kube-master
|
|
||||||
gather_facts: false
|
|
||||||
tasks:
|
|
||||||
- name: add nodes to kube_control_plane group
|
|
||||||
group_by:
|
|
||||||
key: 'kube_control_plane'
|
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
|
|
|
@ -2,16 +2,10 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
- name: Add kube-master nodes to kube_control_plane
|
- name: Ensure compatibility with old groups
|
||||||
# This is for old inventory which contains kube-master instead of kube_control_plane
|
import_playbook: legacy_groups.yml
|
||||||
hosts: kube-master
|
|
||||||
gather_facts: false
|
|
||||||
tasks:
|
|
||||||
- name: add nodes to kube_control_plane group
|
|
||||||
group_by:
|
|
||||||
key: 'kube_control_plane'
|
|
||||||
|
|
||||||
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
|
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
vars_prompt:
|
vars_prompt:
|
||||||
|
@ -34,7 +28,7 @@
|
||||||
- { role: bootstrap-os, tags: bootstrap-os }
|
- { role: bootstrap-os, tags: bootstrap-os }
|
||||||
- { role: remove-node/pre-remove, tags: pre-remove }
|
- { role: remove-node/pre-remove, tags: pre-remove }
|
||||||
|
|
||||||
- hosts: "{{ node | default('kube-node') }}"
|
- hosts: "{{ node | default('kube_node') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|
12
reset.yml
12
reset.yml
|
@ -2,14 +2,8 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
- name: Add kube-master nodes to kube_control_plane
|
- name: Ensure compatibility with old groups
|
||||||
# This is for old inventory which contains kube-master instead of kube_control_plane
|
import_playbook: legacy_groups.yml
|
||||||
hosts: kube-master
|
|
||||||
gather_facts: false
|
|
||||||
tasks:
|
|
||||||
- name: add nodes to kube_control_plane group
|
|
||||||
group_by:
|
|
||||||
key: 'kube_control_plane'
|
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
|
@ -21,7 +15,7 @@
|
||||||
- name: Gather facts
|
- name: Gather facts
|
||||||
import_playbook: facts.yml
|
import_playbook: facts.yml
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:calico-rr
|
- hosts: etcd:k8s_cluster:calico_rr
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
vars_prompt:
|
vars_prompt:
|
||||||
name: "reset_confirmation"
|
name: "reset_confirmation"
|
||||||
|
|
|
@ -548,7 +548,7 @@ downloads:
|
||||||
tag: "{{ netcheck_server_image_tag }}"
|
tag: "{{ netcheck_server_image_tag }}"
|
||||||
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
netcheck_agent:
|
netcheck_agent:
|
||||||
enabled: "{{ deploy_netchecker }}"
|
enabled: "{{ deploy_netchecker }}"
|
||||||
|
@ -557,7 +557,7 @@ downloads:
|
||||||
tag: "{{ netcheck_agent_image_tag }}"
|
tag: "{{ netcheck_agent_image_tag }}"
|
||||||
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
etcd:
|
etcd:
|
||||||
container: "{{ etcd_deployment_type != 'host' }}"
|
container: "{{ etcd_deployment_type != 'host' }}"
|
||||||
|
@ -588,7 +588,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
kubeadm:
|
kubeadm:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -601,7 +601,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
kubelet:
|
kubelet:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -614,7 +614,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
kubectl:
|
kubectl:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -640,7 +640,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
crun:
|
crun:
|
||||||
file: true
|
file: true
|
||||||
|
@ -653,7 +653,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
kata_containers:
|
kata_containers:
|
||||||
enabled: "{{ kata_containers_enabled }}"
|
enabled: "{{ kata_containers_enabled }}"
|
||||||
|
@ -666,7 +666,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
nerdctl:
|
nerdctl:
|
||||||
file: true
|
file: true
|
||||||
|
@ -679,7 +679,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
cilium:
|
cilium:
|
||||||
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
|
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
|
||||||
|
@ -688,7 +688,7 @@ downloads:
|
||||||
tag: "{{ cilium_image_tag }}"
|
tag: "{{ cilium_image_tag }}"
|
||||||
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
cilium_init:
|
cilium_init:
|
||||||
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
|
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
|
||||||
|
@ -697,7 +697,7 @@ downloads:
|
||||||
tag: "{{ cilium_init_image_tag }}"
|
tag: "{{ cilium_init_image_tag }}"
|
||||||
sha256: "{{ cilium_init_digest_checksum|default(None) }}"
|
sha256: "{{ cilium_init_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
cilium_operator:
|
cilium_operator:
|
||||||
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
|
enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}"
|
||||||
|
@ -706,7 +706,7 @@ downloads:
|
||||||
tag: "{{ cilium_operator_image_tag }}"
|
tag: "{{ cilium_operator_image_tag }}"
|
||||||
sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
|
sha256: "{{ cilium_operator_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
multus:
|
multus:
|
||||||
enabled: "{{ kube_network_plugin_multus }}"
|
enabled: "{{ kube_network_plugin_multus }}"
|
||||||
|
@ -715,7 +715,7 @@ downloads:
|
||||||
tag: "{{ multus_image_tag }}"
|
tag: "{{ multus_image_tag }}"
|
||||||
sha256: "{{ multus_digest_checksum|default(None) }}"
|
sha256: "{{ multus_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
flannel:
|
flannel:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||||
|
@ -724,7 +724,7 @@ downloads:
|
||||||
tag: "{{ flannel_image_tag }}"
|
tag: "{{ flannel_image_tag }}"
|
||||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
calicoctl:
|
calicoctl:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
|
@ -737,7 +737,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
calico_node:
|
calico_node:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
|
@ -746,7 +746,7 @@ downloads:
|
||||||
tag: "{{ calico_node_image_tag }}"
|
tag: "{{ calico_node_image_tag }}"
|
||||||
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
calico_cni:
|
calico_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
|
@ -755,7 +755,7 @@ downloads:
|
||||||
tag: "{{ calico_cni_image_tag }}"
|
tag: "{{ calico_cni_image_tag }}"
|
||||||
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
calico_policy:
|
calico_policy:
|
||||||
enabled: "{{ enable_network_policy and kube_network_plugin in ['calico', 'canal'] }}"
|
enabled: "{{ enable_network_policy and kube_network_plugin in ['calico', 'canal'] }}"
|
||||||
|
@ -764,7 +764,7 @@ downloads:
|
||||||
tag: "{{ calico_policy_image_tag }}"
|
tag: "{{ calico_policy_image_tag }}"
|
||||||
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
calico_typha:
|
calico_typha:
|
||||||
enabled: "{{ typha_enabled }}"
|
enabled: "{{ typha_enabled }}"
|
||||||
|
@ -773,7 +773,7 @@ downloads:
|
||||||
tag: "{{ calico_typha_image_tag }}"
|
tag: "{{ calico_typha_image_tag }}"
|
||||||
sha256: "{{ calico_typha_digest_checksum|default(None) }}"
|
sha256: "{{ calico_typha_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
calico_crds:
|
calico_crds:
|
||||||
file: true
|
file: true
|
||||||
|
@ -799,7 +799,7 @@ downloads:
|
||||||
tag: "{{ weave_kube_image_tag }}"
|
tag: "{{ weave_kube_image_tag }}"
|
||||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
weave_npc:
|
weave_npc:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
|
@ -808,7 +808,7 @@ downloads:
|
||||||
tag: "{{ weave_npc_image_tag }}"
|
tag: "{{ weave_npc_image_tag }}"
|
||||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
ovn4nfv:
|
ovn4nfv:
|
||||||
enabled: "{{ kube_network_plugin == 'ovn4nfv' }}"
|
enabled: "{{ kube_network_plugin == 'ovn4nfv' }}"
|
||||||
|
@ -817,7 +817,7 @@ downloads:
|
||||||
tag: "{{ ovn4nfv_k8s_plugin_image_tag }}"
|
tag: "{{ ovn4nfv_k8s_plugin_image_tag }}"
|
||||||
sha256: "{{ ovn4nfv_k8s_plugin_digest_checksum|default(None) }}"
|
sha256: "{{ ovn4nfv_k8s_plugin_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
kube_ovn:
|
kube_ovn:
|
||||||
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
|
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
|
||||||
|
@ -826,7 +826,7 @@ downloads:
|
||||||
tag: "{{ kube_ovn_container_image_tag }}"
|
tag: "{{ kube_ovn_container_image_tag }}"
|
||||||
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
|
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
kube_router:
|
kube_router:
|
||||||
enabled: "{{ kube_network_plugin == 'kube-router' }}"
|
enabled: "{{ kube_network_plugin == 'kube-router' }}"
|
||||||
|
@ -835,7 +835,7 @@ downloads:
|
||||||
tag: "{{ kube_router_image_tag }}"
|
tag: "{{ kube_router_image_tag }}"
|
||||||
sha256: "{{ kube_router_digest_checksum|default(None) }}"
|
sha256: "{{ kube_router_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
pod_infra:
|
pod_infra:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -844,7 +844,7 @@ downloads:
|
||||||
tag: "{{ pod_infra_image_tag }}"
|
tag: "{{ pod_infra_image_tag }}"
|
||||||
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
install_socat:
|
install_socat:
|
||||||
enabled: "{{ ansible_os_family in ['Flatcar Container Linux by Kinvolk'] }}"
|
enabled: "{{ ansible_os_family in ['Flatcar Container Linux by Kinvolk'] }}"
|
||||||
|
@ -853,7 +853,7 @@ downloads:
|
||||||
tag: "{{ install_socat_image_tag }}"
|
tag: "{{ install_socat_image_tag }}"
|
||||||
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
nginx:
|
nginx:
|
||||||
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
|
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
|
||||||
|
@ -862,7 +862,7 @@ downloads:
|
||||||
tag: "{{ nginx_image_tag }}"
|
tag: "{{ nginx_image_tag }}"
|
||||||
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
haproxy:
|
haproxy:
|
||||||
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}"
|
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}"
|
||||||
|
@ -871,7 +871,7 @@ downloads:
|
||||||
tag: "{{ haproxy_image_tag }}"
|
tag: "{{ haproxy_image_tag }}"
|
||||||
sha256: "{{ haproxy_digest_checksum|default(None) }}"
|
sha256: "{{ haproxy_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
coredns:
|
coredns:
|
||||||
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
||||||
|
@ -889,7 +889,7 @@ downloads:
|
||||||
tag: "{{ nodelocaldns_image_tag }}"
|
tag: "{{ nodelocaldns_image_tag }}"
|
||||||
sha256: "{{ nodelocaldns_digest_checksum|default(None) }}"
|
sha256: "{{ nodelocaldns_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- k8s-cluster
|
- k8s_cluster
|
||||||
|
|
||||||
dnsautoscaler:
|
dnsautoscaler:
|
||||||
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
||||||
|
@ -927,7 +927,7 @@ downloads:
|
||||||
tag: "{{ registry_image_tag }}"
|
tag: "{{ registry_image_tag }}"
|
||||||
sha256: "{{ registry_digest_checksum|default(None) }}"
|
sha256: "{{ registry_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
registry_proxy:
|
registry_proxy:
|
||||||
enabled: "{{ registry_enabled }}"
|
enabled: "{{ registry_enabled }}"
|
||||||
|
@ -936,7 +936,7 @@ downloads:
|
||||||
tag: "{{ registry_proxy_image_tag }}"
|
tag: "{{ registry_proxy_image_tag }}"
|
||||||
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
|
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
metrics_server:
|
metrics_server:
|
||||||
enabled: "{{ metrics_server_enabled }}"
|
enabled: "{{ metrics_server_enabled }}"
|
||||||
|
@ -964,7 +964,7 @@ downloads:
|
||||||
tag: "{{ local_volume_provisioner_image_tag }}"
|
tag: "{{ local_volume_provisioner_image_tag }}"
|
||||||
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
cephfs_provisioner:
|
cephfs_provisioner:
|
||||||
enabled: "{{ cephfs_provisioner_enabled }}"
|
enabled: "{{ cephfs_provisioner_enabled }}"
|
||||||
|
@ -973,7 +973,7 @@ downloads:
|
||||||
tag: "{{ cephfs_provisioner_image_tag }}"
|
tag: "{{ cephfs_provisioner_image_tag }}"
|
||||||
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
rbd_provisioner:
|
rbd_provisioner:
|
||||||
enabled: "{{ rbd_provisioner_enabled }}"
|
enabled: "{{ rbd_provisioner_enabled }}"
|
||||||
|
@ -982,7 +982,7 @@ downloads:
|
||||||
tag: "{{ rbd_provisioner_image_tag }}"
|
tag: "{{ rbd_provisioner_image_tag }}"
|
||||||
sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
local_path_provisioner:
|
local_path_provisioner:
|
||||||
enabled: "{{ local_path_provisioner_enabled }}"
|
enabled: "{{ local_path_provisioner_enabled }}"
|
||||||
|
@ -991,7 +991,7 @@ downloads:
|
||||||
tag: "{{ local_path_provisioner_image_tag }}"
|
tag: "{{ local_path_provisioner_image_tag }}"
|
||||||
sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
ingress_nginx_controller:
|
ingress_nginx_controller:
|
||||||
enabled: "{{ ingress_nginx_enabled }}"
|
enabled: "{{ ingress_nginx_enabled }}"
|
||||||
|
@ -1000,7 +1000,7 @@ downloads:
|
||||||
tag: "{{ ingress_nginx_controller_image_tag }}"
|
tag: "{{ ingress_nginx_controller_image_tag }}"
|
||||||
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
ingress_ambassador_controller:
|
ingress_ambassador_controller:
|
||||||
enabled: "{{ ingress_ambassador_enabled }}"
|
enabled: "{{ ingress_ambassador_enabled }}"
|
||||||
|
@ -1009,7 +1009,7 @@ downloads:
|
||||||
tag: "{{ ingress_ambassador_image_tag }}"
|
tag: "{{ ingress_ambassador_image_tag }}"
|
||||||
sha256: "{{ ingress_ambassador_digest_checksum|default(None) }}"
|
sha256: "{{ ingress_ambassador_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
ingress_alb_controller:
|
ingress_alb_controller:
|
||||||
enabled: "{{ ingress_alb_enabled }}"
|
enabled: "{{ ingress_alb_enabled }}"
|
||||||
|
@ -1018,7 +1018,7 @@ downloads:
|
||||||
tag: "{{ alb_ingress_image_tag }}"
|
tag: "{{ alb_ingress_image_tag }}"
|
||||||
sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}"
|
sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
cert_manager_controller:
|
cert_manager_controller:
|
||||||
enabled: "{{ cert_manager_enabled }}"
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
@ -1027,7 +1027,7 @@ downloads:
|
||||||
tag: "{{ cert_manager_controller_image_tag }}"
|
tag: "{{ cert_manager_controller_image_tag }}"
|
||||||
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
cert_manager_cainjector:
|
cert_manager_cainjector:
|
||||||
enabled: "{{ cert_manager_enabled }}"
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
@ -1036,7 +1036,7 @@ downloads:
|
||||||
tag: "{{ cert_manager_cainjector_image_tag }}"
|
tag: "{{ cert_manager_cainjector_image_tag }}"
|
||||||
sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}"
|
sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
cert_manager_webhook:
|
cert_manager_webhook:
|
||||||
enabled: "{{ cert_manager_enabled }}"
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
@ -1045,7 +1045,7 @@ downloads:
|
||||||
tag: "{{ cert_manager_webhook_image_tag }}"
|
tag: "{{ cert_manager_webhook_image_tag }}"
|
||||||
sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}"
|
sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
csi_attacher:
|
csi_attacher:
|
||||||
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
||||||
|
@ -1054,7 +1054,7 @@ downloads:
|
||||||
tag: "{{ csi_attacher_image_tag }}"
|
tag: "{{ csi_attacher_image_tag }}"
|
||||||
sha256: "{{ csi_attacher_digest_checksum|default(None) }}"
|
sha256: "{{ csi_attacher_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
csi_provisioner:
|
csi_provisioner:
|
||||||
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
||||||
|
@ -1063,7 +1063,7 @@ downloads:
|
||||||
tag: "{{ csi_provisioner_image_tag }}"
|
tag: "{{ csi_provisioner_image_tag }}"
|
||||||
sha256: "{{ csi_provisioner_digest_checksum|default(None) }}"
|
sha256: "{{ csi_provisioner_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
csi_snapshotter:
|
csi_snapshotter:
|
||||||
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
||||||
|
@ -1072,7 +1072,7 @@ downloads:
|
||||||
tag: "{{ csi_snapshotter_image_tag }}"
|
tag: "{{ csi_snapshotter_image_tag }}"
|
||||||
sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}"
|
sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
snapshot_controller:
|
snapshot_controller:
|
||||||
enabled: "{{ cinder_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled }}"
|
||||||
|
@ -1081,7 +1081,7 @@ downloads:
|
||||||
tag: "{{ snapshot_controller_image_tag }}"
|
tag: "{{ snapshot_controller_image_tag }}"
|
||||||
sha256: "{{ snapshot_controller_digest_checksum|default(None) }}"
|
sha256: "{{ snapshot_controller_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
csi_resizer:
|
csi_resizer:
|
||||||
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
||||||
|
@ -1090,7 +1090,7 @@ downloads:
|
||||||
tag: "{{ csi_resizer_image_tag }}"
|
tag: "{{ csi_resizer_image_tag }}"
|
||||||
sha256: "{{ csi_resizer_digest_checksum|default(None) }}"
|
sha256: "{{ csi_resizer_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
csi_node_driver_registrar:
|
csi_node_driver_registrar:
|
||||||
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}"
|
||||||
|
@ -1099,7 +1099,7 @@ downloads:
|
||||||
tag: "{{ csi_node_driver_registrar_image_tag }}"
|
tag: "{{ csi_node_driver_registrar_image_tag }}"
|
||||||
sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}"
|
sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
cinder_csi_plugin:
|
cinder_csi_plugin:
|
||||||
enabled: "{{ cinder_csi_enabled }}"
|
enabled: "{{ cinder_csi_enabled }}"
|
||||||
|
@ -1108,7 +1108,7 @@ downloads:
|
||||||
tag: "{{ cinder_csi_plugin_image_tag }}"
|
tag: "{{ cinder_csi_plugin_image_tag }}"
|
||||||
sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}"
|
sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
aws_ebs_csi_plugin:
|
aws_ebs_csi_plugin:
|
||||||
enabled: "{{ aws_ebs_csi_enabled }}"
|
enabled: "{{ aws_ebs_csi_enabled }}"
|
||||||
|
@ -1117,7 +1117,7 @@ downloads:
|
||||||
tag: "{{ aws_ebs_csi_plugin_image_tag }}"
|
tag: "{{ aws_ebs_csi_plugin_image_tag }}"
|
||||||
sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}"
|
sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-node
|
- kube_node
|
||||||
|
|
||||||
dashboard:
|
dashboard:
|
||||||
enabled: "{{ dashboard_enabled }}"
|
enabled: "{{ dashboard_enabled }}"
|
||||||
|
|
|
@ -55,7 +55,7 @@
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}"
|
repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}"
|
||||||
tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}"
|
tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}"
|
||||||
groups: k8s-cluster
|
groups: k8s_cluster
|
||||||
loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
|
loop: "{{ kubeadm_images_list | flatten(levels=1) }}"
|
||||||
register: kubeadm_images_cooked
|
register: kubeadm_images_cooked
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -55,7 +55,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %
|
||||||
|
|
||||||
etcd_blkio_weight: 1000
|
etcd_blkio_weight: 1000
|
||||||
|
|
||||||
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
etcd_node_cert_hosts: "{{ groups['k8s_cluster'] | union(groups.get('calico_rr', [])) }}"
|
||||||
|
|
||||||
etcd_compaction_retention: "8"
|
etcd_compaction_retention: "8"
|
||||||
|
|
||||||
|
|
|
@ -33,8 +33,8 @@
|
||||||
stat:
|
stat:
|
||||||
path: "{{ etcd_cert_dir }}/{{ item }}"
|
path: "{{ etcd_cert_dir }}/{{ item }}"
|
||||||
register: etcd_node_certs
|
register: etcd_node_certs
|
||||||
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||||
inventory_hostname in groups['k8s-cluster'])
|
inventory_hostname in groups['k8s_cluster'])
|
||||||
with_items:
|
with_items:
|
||||||
- ca.pem
|
- ca.pem
|
||||||
- node-{{ inventory_hostname }}.pem
|
- node-{{ inventory_hostname }}.pem
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
|
'{{ etcd_cert_dir }}/member-{{ host }}.pem',
|
||||||
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
|
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem',
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% set k8s_nodes = groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort %}
|
{% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort %}
|
||||||
{% for host in k8s_nodes %}
|
{% for host in k8s_nodes %}
|
||||||
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
|
'{{ etcd_cert_dir }}/node-{{ host }}.pem',
|
||||||
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
|
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
|
||||||
|
@ -89,7 +89,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
gen_node_certs: |-
|
gen_node_certs: |-
|
||||||
{
|
{
|
||||||
{% set k8s_nodes = groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort -%}
|
{% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort -%}
|
||||||
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
|
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
|
||||||
{% for host in k8s_nodes -%}
|
{% for host in k8s_nodes -%}
|
||||||
{% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
|
{% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %}
|
||||||
|
@ -125,8 +125,8 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
kubernetes_host_requires_sync: true
|
kubernetes_host_requires_sync: true
|
||||||
when:
|
when:
|
||||||
- (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
- (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||||
inventory_hostname in groups['k8s-cluster']) and
|
inventory_hostname in groups['k8s_cluster']) and
|
||||||
inventory_hostname not in groups['etcd']
|
inventory_hostname not in groups['etcd']
|
||||||
- (not etcd_node_certs.results[0].stat.exists|default(false)) or
|
- (not etcd_node_certs.results[0].stat.exists|default(false)) or
|
||||||
(not etcd_node_certs.results[1].stat.exists|default(false)) or
|
(not etcd_node_certs.results[1].stat.exists|default(false)) or
|
||||||
|
|
|
@ -59,7 +59,7 @@
|
||||||
{{ m }}
|
{{ m }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}"
|
{% endfor %}"
|
||||||
- HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
- HOSTS: "{% for h in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
|
||||||
{% if gen_node_certs[h] %}
|
{% if gen_node_certs[h] %}
|
||||||
{{ h }}
|
{{ h }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -109,7 +109,7 @@
|
||||||
src: "{{ item }}"
|
src: "{{ item }}"
|
||||||
register: etcd_master_node_certs
|
register: etcd_master_node_certs
|
||||||
with_items:
|
with_items:
|
||||||
- "[{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
- "[{% for node in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %}
|
||||||
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
|
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
|
||||||
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
|
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
|
||||||
{% endfor %}]"
|
{% endfor %}]"
|
||||||
|
@ -144,8 +144,8 @@
|
||||||
- name: "Check_certs | Set 'sync_certs' to true on nodes"
|
- name: "Check_certs | Set 'sync_certs' to true on nodes"
|
||||||
set_fact:
|
set_fact:
|
||||||
sync_certs: true
|
sync_certs: true
|
||||||
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||||
inventory_hostname in groups['k8s-cluster']) and
|
inventory_hostname in groups['k8s_cluster']) and
|
||||||
inventory_hostname not in groups['etcd']
|
inventory_hostname not in groups['etcd']
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ my_etcd_node_certs }}"
|
- "{{ my_etcd_node_certs }}"
|
||||||
|
@ -159,8 +159,8 @@
|
||||||
register: etcd_node_certs
|
register: etcd_node_certs
|
||||||
check_mode: no
|
check_mode: no
|
||||||
delegate_to: "{{ groups['etcd'][0] }}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||||
inventory_hostname in groups['k8s-cluster']) and
|
inventory_hostname in groups['k8s_cluster']) and
|
||||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||||
|
|
||||||
- name: Gen_certs | Copy certs on nodes
|
- name: Gen_certs | Copy certs on nodes
|
||||||
|
@ -170,8 +170,8 @@
|
||||||
no_log: true
|
no_log: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or
|
||||||
inventory_hostname in groups['k8s-cluster']) and
|
inventory_hostname in groups['k8s_cluster']) and
|
||||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||||
notify: set etcd_secret_changed
|
notify: set etcd_secret_changed
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
register: "etcd_client_cert_serial_result"
|
register: "etcd_client_cert_serial_result"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort
|
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
|
||||||
tags:
|
tags:
|
||||||
- master
|
- master
|
||||||
- network
|
- network
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
|
etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort
|
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
|
||||||
tags:
|
tags:
|
||||||
- master
|
- master
|
||||||
- network
|
- network
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
- name: Cinder CSI Driver | Write cacert file
|
- name: Cinder CSI Driver | Write cacert file
|
||||||
include_tasks: cinder-write-cacert.yml
|
include_tasks: cinder-write-cacert.yml
|
||||||
run_once: true
|
run_once: true
|
||||||
loop: "{{ groups['k8s-cluster'] }}"
|
loop: "{{ groups['k8s_cluster'] }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: delegate_host_to_write_cacert
|
loop_var: delegate_host_to_write_cacert
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- cinder_cacert is defined
|
- cinder_cacert is defined
|
||||||
- cinder_cacert | length > 0
|
- cinder_cacert | length > 0
|
||||||
tags: cinder-csi-driver
|
tags: cinder-csi-driver
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
- name: External OpenStack Cloud Controller | Write cacert file
|
- name: External OpenStack Cloud Controller | Write cacert file
|
||||||
include_tasks: openstack-write-cacert.yml
|
include_tasks: openstack-write-cacert.yml
|
||||||
run_once: true
|
run_once: true
|
||||||
loop: "{{ groups['k8s-cluster'] }}"
|
loop: "{{ groups['k8s_cluster'] }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: delegate_host_to_write_cacert
|
loop_var: delegate_host_to_write_cacert
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- external_openstack_cacert is defined
|
- external_openstack_cacert is defined
|
||||||
- external_openstack_cacert | length > 0
|
- external_openstack_cacert | length > 0
|
||||||
tags: external-openstack
|
tags: external-openstack
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
include_tasks: basedirs.yml
|
include_tasks: basedirs.yml
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: delegate_host_base_dir
|
loop_var: delegate_host_base_dir
|
||||||
loop: "{{ groups['k8s-cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}"
|
loop: "{{ groups['k8s_cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}"
|
||||||
|
|
||||||
- name: Local Volume Provisioner | Create addon dir
|
- name: Local Volume Provisioner | Create addon dir
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -33,7 +33,7 @@ LS0tLS1CRUdJTiBSU0Eg...
|
||||||
|
|
||||||
For further information, read the official [Cert-Manager CA Configuration](https://cert-manager.io/docs/configuration/ca/) doc.
|
For further information, read the official [Cert-Manager CA Configuration](https://cert-manager.io/docs/configuration/ca/) doc.
|
||||||
|
|
||||||
Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s-cluster\addons.yml` and setting `cert_manager_enabled` to true.
|
Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and setting `cert_manager_enabled` to true.
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
# Cert manager deployment
|
# Cert manager deployment
|
||||||
|
@ -46,7 +46,7 @@ If you don't have a TLS Root CA certificate and key available, you can create th
|
||||||
|
|
||||||
A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this.
|
A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this.
|
||||||
|
|
||||||
To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s-cluster\addons.yml` and set `ingress_nginx_enabled` to true.
|
To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and set `ingress_nginx_enabled` to true.
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
# Nginx ingress controller deployment
|
# Nginx ingress controller deployment
|
||||||
|
|
|
@ -11,7 +11,7 @@ It deploys MetalLB into Kubernetes and sets up a layer 2 or BGP load-balancer.
|
||||||
|
|
||||||
In the default, MetalLB is not deployed into your Kubernetes cluster.
|
In the default, MetalLB is not deployed into your Kubernetes cluster.
|
||||||
You can override the defaults by copying the contents of roles/kubernetes-apps/metallb/defaults/main.yml
|
You can override the defaults by copying the contents of roles/kubernetes-apps/metallb/defaults/main.yml
|
||||||
to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml
|
to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s_cluster/addons.yml
|
||||||
and updating metallb_enabled option to `true`.
|
and updating metallb_enabled option to `true`.
|
||||||
In addition you need to update metallb_ip_range option on the addons.yml at least for suiting your network
|
In addition you need to update metallb_ip_range option on the addons.yml at least for suiting your network
|
||||||
environment, because MetalLB allocates external IP addresses from this metallb_ip_range option.
|
environment, because MetalLB allocates external IP addresses from this metallb_ip_range option.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# If all masters have node role, there are no tainted master and toleration should not be specified.
|
# If all masters have node role, there are no tainted master and toleration should not be specified.
|
||||||
- name: Check all masters are node or not
|
- name: Check all masters are node or not
|
||||||
set_fact:
|
set_fact:
|
||||||
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
|
masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
|
||||||
|
|
||||||
- name: Metrics Server | Delete addon dir
|
- name: Metrics Server | Delete addon dir
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -192,5 +192,5 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "node-role.kubernetes.io/master:NoSchedule-"
|
- "node-role.kubernetes.io/master:NoSchedule-"
|
||||||
- "node-role.kubernetes.io/control-plane:NoSchedule-"
|
- "node-role.kubernetes.io/control-plane:NoSchedule-"
|
||||||
when: inventory_hostname in groups['kube-node']
|
when: inventory_hostname in groups['kube_node']
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -16,7 +16,7 @@ nodeRegistration:
|
||||||
{% if kube_override_hostname|default('') %}
|
{% if kube_override_hostname|default('') %}
|
||||||
name: {{ kube_override_hostname }}
|
name: {{ kube_override_hostname }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube-node'] %}
|
{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %}
|
||||||
taints:
|
taints:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/master
|
key: node-role.kubernetes.io/master
|
||||||
|
|
|
@ -50,7 +50,7 @@
|
||||||
register: "etcd_client_cert_serial_result"
|
register: "etcd_client_cert_serial_result"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort
|
- inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort
|
||||||
tags:
|
tags:
|
||||||
- network
|
- network
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ caCertPath: {{ kube_cert_dir }}/ca.crt
|
||||||
nodeRegistration:
|
nodeRegistration:
|
||||||
name: {{ kube_override_hostname }}
|
name: {{ kube_override_hostname }}
|
||||||
criSocket: {{ cri_socket }}
|
criSocket: {{ cri_socket }}
|
||||||
{% if 'calico-rr' in group_names and 'kube-node' not in group_names %}
|
{% if 'calico_rr' in group_names and 'kube_node' not in group_names %}
|
||||||
taints:
|
taints:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/calico-rr
|
key: node-role.kubernetes.io/calico-rr
|
||||||
|
|
|
@ -81,7 +81,7 @@ resolvConf: "{{ kube_resolv_conf }}"
|
||||||
{% if kubelet_config_extra_args %}
|
{% if kubelet_config_extra_args %}
|
||||||
{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }}
|
{{ kubelet_config_extra_args | to_nice_yaml(indent=2) }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if inventory_hostname in groups['kube-node'] and kubelet_node_config_extra_args %}
|
{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %}
|
||||||
{{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }}
|
{{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if tls_min_version is defined %}
|
{% if tls_min_version is defined %}
|
||||||
|
|
|
@ -34,7 +34,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
|
||||||
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
{% if kubelet_flexvolumes_plugins_dir is defined %}
|
||||||
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
---
|
---
|
||||||
- name: Stop if either kube_control_plane or kube-node group is empty
|
- name: Stop if either kube_control_plane or kube_node group is empty
|
||||||
assert:
|
assert:
|
||||||
that: "groups.get('{{ item }}')"
|
that: "groups.get('{{ item }}')"
|
||||||
with_items:
|
with_items:
|
||||||
- kube_control_plane
|
- kube_control_plane
|
||||||
- kube-node
|
- kube_node
|
||||||
run_once: true
|
run_once: true
|
||||||
when: not ignore_assert_errors
|
when: not ignore_assert_errors
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@
|
||||||
that: ansible_memtotal_mb >= minimal_node_memory_mb
|
that: ansible_memtotal_mb >= minimal_node_memory_mb
|
||||||
when:
|
when:
|
||||||
- not ignore_assert_errors
|
- not ignore_assert_errors
|
||||||
- inventory_hostname in groups['kube-node']
|
- inventory_hostname in groups['kube_node']
|
||||||
|
|
||||||
# This assertion will fail on the safe side: One can indeed schedule more pods
|
# This assertion will fail on the safe side: One can indeed schedule more pods
|
||||||
# on a node than the CIDR-range has space for when additional pods use the host
|
# on a node than the CIDR-range has space for when additional pods use the host
|
||||||
|
@ -99,7 +99,7 @@
|
||||||
msg: "Do not schedule more pods on a node than inet addresses are available."
|
msg: "Do not schedule more pods on a node than inet addresses are available."
|
||||||
when:
|
when:
|
||||||
- not ignore_assert_errors
|
- not ignore_assert_errors
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- kube_network_node_prefix is defined
|
- kube_network_node_prefix is defined
|
||||||
- kube_network_plugin != 'calico'
|
- kube_network_plugin != 'calico'
|
||||||
|
|
||||||
|
@ -207,14 +207,14 @@
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
- name: "Check that calico_rr nodes are in k8s-cluster group"
|
- name: "Check that calico_rr nodes are in k8s_cluster group"
|
||||||
assert:
|
assert:
|
||||||
that:
|
that:
|
||||||
- '"k8s-cluster" in group_names'
|
- '"k8s_cluster" in group_names'
|
||||||
msg: "calico-rr must be a child group of k8s-cluster group"
|
msg: "calico_rr must be a child group of k8s_cluster group"
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin == 'calico'
|
- kube_network_plugin == 'calico'
|
||||||
- '"calico-rr" in group_names'
|
- '"calico_rr" in group_names'
|
||||||
|
|
||||||
- name: "Check that kube_service_addresses is a network range"
|
- name: "Check that kube_service_addresses is a network range"
|
||||||
assert:
|
assert:
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: kube
|
owner: kube
|
||||||
when: inventory_hostname in groups['k8s-cluster']
|
when: inventory_hostname in groups['k8s_cluster']
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: root
|
owner: root
|
||||||
when: inventory_hostname in groups['k8s-cluster']
|
when: inventory_hostname in groups['k8s_cluster']
|
||||||
become: true
|
become: true
|
||||||
tags:
|
tags:
|
||||||
- kubelet
|
- kubelet
|
||||||
|
@ -51,7 +51,7 @@
|
||||||
get_mime: no
|
get_mime: no
|
||||||
register: kube_cert_compat_dir_check
|
register: kube_cert_compat_dir_check
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- kube_cert_dir != kube_cert_compat_dir
|
- kube_cert_dir != kube_cert_compat_dir
|
||||||
|
|
||||||
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
|
- name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498)
|
||||||
|
@ -60,7 +60,7 @@
|
||||||
dest: "{{ kube_cert_compat_dir }}"
|
dest: "{{ kube_cert_compat_dir }}"
|
||||||
state: link
|
state: link
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- kube_cert_dir != kube_cert_compat_dir
|
- kube_cert_dir != kube_cert_compat_dir
|
||||||
- not kube_cert_compat_dir_check.stat.exists
|
- not kube_cert_compat_dir_check.stat.exists
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
- "/var/lib/calico"
|
- "/var/lib/calico"
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"]
|
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"]
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
tags:
|
tags:
|
||||||
- network
|
- network
|
||||||
- cilium
|
- cilium
|
||||||
|
@ -96,7 +96,7 @@
|
||||||
mode: "{{ local_volume_provisioner_directory_mode }}"
|
mode: "{{ local_volume_provisioner_directory_mode }}"
|
||||||
with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
|
with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- local_volume_provisioner_enabled
|
- local_volume_provisioner_enabled
|
||||||
tags:
|
tags:
|
||||||
- persistent_volumes
|
- persistent_volumes
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Hosts | create list from inventory
|
- name: Hosts | create list from inventory
|
||||||
set_fact:
|
set_fact:
|
||||||
etc_hosts_inventory_block: |-
|
etc_hosts_inventory_block: |-
|
||||||
{% for item in (groups['k8s-cluster'] + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique -%}
|
{% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
|
||||||
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
|
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%}
|
||||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
|
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}
|
||||||
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }}.{{ dns_domain }} {{ item }}
|
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }}.{{ dns_domain }} {{ item }}
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
TOKEN_DIR: "{{ kube_token_dir }}"
|
TOKEN_DIR: "{{ kube_token_dir }}"
|
||||||
with_nested:
|
with_nested:
|
||||||
- [ 'system:kubelet' ]
|
- [ 'system:kubelet' ]
|
||||||
- "{{ groups['kube-node'] }}"
|
- "{{ groups['kube_node'] }}"
|
||||||
register: gentoken_node
|
register: gentoken_node
|
||||||
changed_when: "'Added' in gentoken_node.stdout"
|
changed_when: "'Added' in gentoken_node.stdout"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
|
@ -147,8 +147,8 @@ kube_log_level: 2
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
kube_network_plugin_multus: false
|
kube_network_plugin_multus: false
|
||||||
|
|
||||||
# Determines if calico-rr group exists
|
# Determines if calico_rr group exists
|
||||||
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
|
peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}"
|
||||||
|
|
||||||
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
|
||||||
calico_datastore: "kdd"
|
calico_datastore: "kdd"
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
tags: always
|
tags: always
|
||||||
include_tasks: fallback_ips_gather.yml
|
include_tasks: fallback_ips_gather.yml
|
||||||
when: hostvars[delegate_host_to_gather_facts].ansible_default_ipv4 is not defined
|
when: hostvars[delegate_host_to_gather_facts].ansible_default_ipv4 is not defined
|
||||||
loop: "{{ groups['k8s-cluster']|default([]) + groups['etcd']|default([]) + groups['calico-rr']|default([]) }}"
|
loop: "{{ groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]) }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
loop_var: delegate_host_to_gather_facts
|
loop_var: delegate_host_to_gather_facts
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
fallback_ips_base: |
|
fallback_ips_base: |
|
||||||
---
|
---
|
||||||
{% for item in (groups['k8s-cluster']|default([]) + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique %}
|
{% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %}
|
||||||
{% set found = hostvars[item].get('ansible_default_ipv4') %}
|
{% set found = hostvars[item].get('ansible_default_ipv4') %}
|
||||||
{{ item }}: "{{ found.get('address', '127.0.0.1') }}"
|
{{ item }}: "{{ found.get('address', '127.0.0.1') }}"
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
|
@ -9,9 +9,9 @@
|
||||||
{%- if no_proxy_exclude_workers | default(false) -%}
|
{%- if no_proxy_exclude_workers | default(false) -%}
|
||||||
{% set cluster_or_master = 'kube_control_plane' %}
|
{% set cluster_or_master = 'kube_control_plane' %}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
{% set cluster_or_master = 'k8s-cluster' %}
|
{% set cluster_or_master = 'k8s_cluster' %}
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
{%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique -%}
|
{%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%}
|
||||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
|
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
|
||||||
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
||||||
{{ hostvars[item]['ansible_hostname'] }},
|
{{ hostvars[item]['ansible_hostname'] }},
|
||||||
|
|
|
@ -193,7 +193,7 @@
|
||||||
nodeToNodeMeshEnabled: "false"
|
nodeToNodeMeshEnabled: "false"
|
||||||
when:
|
when:
|
||||||
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
|
- peer_with_router|default(false) or peer_with_calico_rr|default(false)
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
- name: Calico | Set up BGP Configuration
|
- name: Calico | Set up BGP Configuration
|
||||||
|
@ -264,7 +264,7 @@
|
||||||
until: output.rc == 0
|
until: output.rc == 0
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['calico-rr'] | default([]) }}"
|
- "{{ groups['calico_rr'] | default([]) }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- peer_with_calico_rr|default(false)
|
- peer_with_calico_rr|default(false)
|
||||||
|
@ -290,7 +290,7 @@
|
||||||
until: output.rc == 0
|
until: output.rc == 0
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['calico-rr'] | default([]) }}"
|
- "{{ groups['calico_rr'] | default([]) }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- peer_with_calico_rr|default(false)
|
- peer_with_calico_rr|default(false)
|
||||||
|
@ -368,9 +368,9 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
when:
|
when:
|
||||||
- peer_with_router|default(false)
|
- peer_with_router|default(false)
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
- local_as is defined
|
- local_as is defined
|
||||||
- groups['calico-rr'] | default([]) | length == 0
|
- groups['calico_rr'] | default([]) | length == 0
|
||||||
|
|
||||||
- name: Calico | Configure peering with router(s) at node scope
|
- name: Calico | Configure peering with router(s) at node scope
|
||||||
command:
|
command:
|
||||||
|
@ -396,4 +396,4 @@
|
||||||
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
|
- "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}"
|
||||||
when:
|
when:
|
||||||
- peer_with_router|default(false)
|
- peer_with_router|default(false)
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s_cluster']
|
||||||
|
|
|
@ -22,6 +22,6 @@ data:
|
||||||
cluster_type: "kubespray,bgp"
|
cluster_type: "kubespray,bgp"
|
||||||
calico_backend: "bird"
|
calico_backend: "bird"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false) %}
|
{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %}
|
||||||
as: "{{ local_as|default(global_as_num) }}"
|
as: "{{ local_as|default(global_as_num) }}"
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
|
@ -6,16 +6,16 @@
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
|
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
|
||||||
|
|
||||||
- name: kube-router | Add annotations on kube-node
|
- name: kube-router | Add annotations on kube_node
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_node }}"
|
- "{{ kube_router_annotations_node }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
|
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
|
||||||
|
|
||||||
- name: kube-router | Add common annotations on all servers
|
- name: kube-router | Add common annotations on all servers
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_all }}"
|
- "{{ kube_router_annotations_all }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s-cluster']
|
when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster']
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube_node'] %}
|
||||||
{% if hostvars[host]['access_ip'] is defined %}
|
{% if hostvars[host]['access_ip'] is defined %}
|
||||||
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
||||||
{{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
|
{{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
|
||||||
|
|
|
@ -4,7 +4,7 @@ Name=mac0
|
||||||
[Network]
|
[Network]
|
||||||
Address={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}/{{ node_pod_cidr|ipaddr('prefix') }}
|
Address={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}/{{ node_pod_cidr|ipaddr('prefix') }}
|
||||||
|
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube_node'] %}
|
||||||
{% if hostvars[host]['access_ip'] is defined %}
|
{% if hostvars[host]['access_ip'] is defined %}
|
||||||
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
||||||
[Route]
|
[Route]
|
||||||
|
|
|
@ -5,7 +5,7 @@ iface mac0 inet static
|
||||||
netmask {{ node_pod_cidr|ipaddr('netmask') }}
|
netmask {{ node_pod_cidr|ipaddr('netmask') }}
|
||||||
broadcast {{ node_pod_cidr|ipaddr('broadcast') }}
|
broadcast {{ node_pod_cidr|ipaddr('broadcast') }}
|
||||||
pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge
|
pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube_node'] %}
|
||||||
{% if hostvars[host]['access_ip'] is defined %}
|
{% if hostvars[host]['access_ip'] is defined %}
|
||||||
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
||||||
post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
|
post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
|
||||||
|
@ -15,7 +15,7 @@ iface mac0 inet static
|
||||||
{% if enable_nat_default_gateway %}
|
{% if enable_nat_default_gateway %}
|
||||||
post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE
|
post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% for host in groups['kube-node'] %}
|
{% for host in groups['kube_node'] %}
|
||||||
{% if hostvars[host]['access_ip'] is defined %}
|
{% if hostvars[host]['access_ip'] is defined %}
|
||||||
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
{% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %}
|
||||||
post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
|
post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }}
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
--grace-period {{ drain_grace_period }}
|
--grace-period {{ drain_grace_period }}
|
||||||
--timeout {{ drain_timeout }}
|
--timeout {{ drain_timeout }}
|
||||||
--delete-local-data {{ hostvars[item]['kube_override_hostname']|default(item) }}
|
--delete-local-data {{ hostvars[item]['kube_override_hostname']|default(item) }}
|
||||||
loop: "{{ node.split(',') | default(groups['kube-node']) }}"
|
loop: "{{ node.split(',') | default(groups['kube_node']) }}"
|
||||||
# ignore servers that are not nodes
|
# ignore servers that are not nodes
|
||||||
when: hostvars[item]['kube_override_hostname']|default(item) in nodes.stdout_lines
|
when: hostvars[item]['kube_override_hostname']|default(item) in nodes.stdout_lines
|
||||||
register: result
|
register: result
|
||||||
|
|
|
@ -207,7 +207,7 @@
|
||||||
- name: Clear IPVS virtual server table
|
- name: Clear IPVS virtual server table
|
||||||
command: "ipvsadm -C"
|
command: "ipvsadm -C"
|
||||||
when:
|
when:
|
||||||
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
|
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster']
|
||||||
|
|
||||||
- name: reset | check kube-ipvs0 network device
|
- name: reset | check kube-ipvs0 network device
|
||||||
stat:
|
stat:
|
||||||
|
|
18
scale.yml
18
scale.yml
|
@ -2,14 +2,8 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
- name: Add kube-master nodes to kube_control_plane
|
- name: Ensure compatibility with old groups
|
||||||
# This is for old inventory which contains kube-master instead of kube_control_plane
|
import_playbook: legacy_groups.yml
|
||||||
hosts: kube-master
|
|
||||||
gather_facts: false
|
|
||||||
tasks:
|
|
||||||
- name: add nodes to kube_control_plane group
|
|
||||||
group_by:
|
|
||||||
key: 'kube_control_plane'
|
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
|
@ -19,7 +13,7 @@
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||||
|
|
||||||
- name: Bootstrap any new workers
|
- name: Bootstrap any new workers
|
||||||
hosts: kube-node
|
hosts: kube_node
|
||||||
strategy: linear
|
strategy: linear
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
@ -52,7 +46,7 @@
|
||||||
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
|
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
|
||||||
|
|
||||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
|
||||||
hosts: kube-node
|
hosts: kube_node
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -64,7 +58,7 @@
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false, when: "not etcd_kubeadm_enabled|default(false)" }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false, when: "not etcd_kubeadm_enabled|default(false)" }
|
||||||
|
|
||||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
|
||||||
hosts: kube-node
|
hosts: kube_node
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -95,7 +89,7 @@
|
||||||
when: kubeadm_certificate_key is not defined
|
when: kubeadm_certificate_key is not defined
|
||||||
|
|
||||||
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
|
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
|
||||||
hosts: kube-node
|
hosts: kube_node
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- hosts: kube-node
|
- hosts: kube_node
|
||||||
become: False
|
become: False
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue