diff --git a/cluster.yml b/cluster.yml index e773931bf..f0c324174 100644 --- a/cluster.yml +++ b/cluster.yml @@ -2,8 +2,8 @@ - hosts: localhost gather_facts: False roles: - - bastion-ssh-config - tags: [localhost, bastion] + - { role: kargo-defaults} + - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: true @@ -13,9 +13,8 @@ # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. ansible_ssh_pipelining: false roles: - - bootstrap-os - tags: - - bootstrap-os + - { role: kargo-defaults} + - { role: bootstrap-os, tags: bootstrap-os} - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: true @@ -26,6 +25,7 @@ - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kubernetes/preinstall, tags: preinstall } - { role: docker, tags: docker } @@ -36,47 +36,56 @@ - hosts: etcd:k8s-cluster:vault any_errors_fatal: true roles: + - { role: kargo-defaults, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - hosts: etcd:!k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: etcd, tags: etcd } - hosts: k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: etcd, tags: etcd } - hosts: etcd:k8s-cluster:vault any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: vault, tags: vault, when: "cert_management == 'vault'"} - hosts: k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: kubernetes/node, tags: node } - { role: network_plugin, tags: network } - hosts: kube-master any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: kubernetes/master, tags: master } - { role: kubernetes-apps/network_plugin, tags: network } - hosts: calico-rr any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: network_plugin/calico/rr, tags: network } - hosts: k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - hosts: kube-master[0] any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: kubernetes-apps, tags: apps } diff --git a/inventory/group_vars/new-york.yml b/docs/calico_peer_example/new-york.yml similarity index 100% rename from inventory/group_vars/new-york.yml rename to docs/calico_peer_example/new-york.yml diff --git a/inventory/group_vars/paris.yml b/docs/calico_peer_example/paris.yml similarity index 100% rename from inventory/group_vars/paris.yml rename to docs/calico_peer_example/paris.yml diff --git a/inventory/group_vars/calico-rr.yml b/inventory/group_vars/calico-rr.yml deleted file mode 100644 index 5de7d7347..000000000 --- a/inventory/group_vars/calico-rr.yml +++ /dev/null @@ -1,33 +0,0 @@ -## Required for bootstrap-os/preinstall/download roles and setting facts -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin -docker_bin_dir: /usr/bin - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -kube_service_addresses: 10.233.0.0/18 -kube_apiserver_port: 443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: dnsmasq_kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" diff --git a/inventory/group_vars/etcd.yml b/inventory/group_vars/etcd.yml deleted file mode 100644 index 8769967fa..000000000 --- a/inventory/group_vars/etcd.yml +++ /dev/null @@ -1,38 +0,0 @@ -## Required for bootstrap-os/preinstall/download roles and setting facts -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin -docker_bin_dir: /usr/bin - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# Settings for containerized control plane (etcd/secrets) -etcd_deployment_type: docker -cert_management: script -vault_deployment_type: docker - -kube_service_addresses: 10.233.0.0/18 -kube_apiserver_port: 443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: dnsmasq_kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index b7c155202..d465e69b9 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -1,113 +1,115 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none +# # Valid bootstrap options (required): ubuntu, coreos, centos, none +# bootstrap_os: none -# Directory where the binaries will be installed -bin_dir: /usr/local/bin +# # Directory where the binaries will be installed +# bin_dir: /usr/local/bin -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location and namespace. -# Editting those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system +# # Kubernetes configuration dirs and system namespace. +# # Those are where all the additional config stuff goes +# # the kubernetes normally puts in /srv/kubernets. +# # This puts them in a sane location and namespace. +# # Editting those values will almost surely break something. +# kube_config_dir: /etc/kubernetes +# kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +# kube_manifest_dir: "{{ kube_config_dir }}/manifests" +# system_namespace: kube-system -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" +# # Logging directory (sysvinit systems) +# kube_log_dir: "/var/log/kubernetes" -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" +# # This is where all the cert scripts and certs will be located +# kube_cert_dir: "{{ kube_config_dir }}/ssl" -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" +# # This is where all of the bearer tokens will be stored +# kube_token_dir: "{{ kube_config_dir }}/tokens" -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" +# # This is where to save basic auth file +# kube_users_dir: "{{ kube_config_dir }}/users" -## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.5.3 +# kube_api_anonymous_auth: false -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 +# ## Change this to use another Kubernetes version, e.g. a current beta release +# kube_version: v1.5.3 -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert +# # Where the binaries will be downloaded. +# # Note: ensure that you've enough disk space (about 1G) +# local_release_dir: "/tmp/releases" +# # Random shifts for retrying failed ops like pushing/downloading +# retry_stagger: 5 -# Cluster Loglevel configuration -kube_log_level: 2 +# # This is the group that the cert creation scripts chgrp the +# # cert files to. Not really changable... +# kube_cert_group: kube-cert -# Users to create for basic auth in Kubernetes API via HTTP -kube_api_pwd: "changeme" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - root: - pass: "{{kube_api_pwd}}" - role: admin +# # Cluster Loglevel configuration +# kube_log_level: 2 -# Choose network plugin (calico, weave or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: calico +# # Users to create for basic auth in Kubernetes API via HTTP +# kube_api_pwd: "changeme" +# kube_users: +# kube: +# pass: "{{kube_api_pwd}}" +# role: admin +# root: +# pass: "{{kube_api_pwd}}" +# role: admin -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 +# # Choose network plugin (calico, weave or flannel) +# # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +# kube_network_plugin: calico -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 +# # Kubernetes internal network for services, unused block of space. +# kube_service_addresses: 10.233.0.0/18 -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 +# # internal network. When used, it will assign IP +# # addresses from this range to individual pods. +# # This network must be unused in your network infrastructure! +# kube_pods_subnet: 10.233.64.0/18 -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 443 # (https) -kube_apiserver_insecure_port: 8080 # (http) +# # internal network node size allocation (optional). This is the size allocated +# # to each node on your network. With these defaults you should have +# # room for 4096 nodes with 254 pods per node. +# kube_network_node_prefix: 24 -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: dnsmasq_kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" +# # The port the API Server will be listening on. +# kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +# kube_apiserver_port: 443 # (https) +# kube_apiserver_insecure_port: 8080 # (http) -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" +# # DNS configuration. +# # Kubernetes cluster name, also will be used as DNS domain +# cluster_name: cluster.local +# # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +# ndots: 2 +# # Can be dnsmasq_kubedns, kubedns or none +# dns_mode: dnsmasq_kubedns +# # Can be docker_dns, host_resolvconf or none +# resolvconf_mode: docker_dns +# # Deploy netchecker app to verify DNS resolve as an HTTP service +# deploy_netchecker: false +# # Ip address of the kubernetes skydns service +# skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" +# dns_domain: "{{ cluster_name }}" -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false" -docker_bin_dir: "/usr/bin" +# # Path used to store Docker data +# docker_daemon_graph: "/var/lib/docker" -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker -kubelet_deployment_type: docker -cert_management: script -vault_deployment_type: docker +# ## A string of extra options to pass to the docker daemon. +# ## This string should be exactly as you wish it to appear. +# ## An obvious use case is allowing insecure-registry access +# ## to self hosted registries like so: +# docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false" +# docker_bin_dir: "/usr/bin" -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent +# # Settings for containerized control plane (etcd/kubelet/secrets) +# etcd_deployment_type: docker +# kubelet_deployment_type: docker +# cert_management: script +# vault_deployment_type: docker -# Monitoring apps for k8s -efk_enabled: false +# # K8s image pull policy (imagePullPolicy) +# k8s_image_pull_policy: IfNotPresent + +# # Monitoring apps for k8s +# efk_enabled: false diff --git a/inventory/group_vars/kube-master.yml b/inventory/group_vars/kube-master.yml deleted file mode 100644 index 7e75bf859..000000000 --- a/inventory/group_vars/kube-master.yml +++ /dev/null @@ -1,9 +0,0 @@ -# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was -# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the -# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5 -kube_api_anonymous_auth: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -kube_version: v1.5.3 diff --git a/reset.yml b/reset.yml index 42a188ccc..b6e15d828 100644 --- a/reset.yml +++ b/reset.yml @@ -14,4 +14,5 @@ when: reset_confirmation != "yes" roles: + - { role: kargo-defaults} - { role: reset, tags: reset } diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml new file mode 100644 index 000000000..9760058c4 --- /dev/null +++ b/roles/kargo-defaults/defaults/main.yaml @@ -0,0 +1,114 @@ +## Required for bootstrap-os/preinstall/download roles and setting facts +# Valid bootstrap options (required): ubuntu, coreos, centos, none +bootstrap_os: none +kube_api_anonymous_auth: false + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.5.3 + +# Directory where the binaries will be installed +bin_dir: /usr/local/bin +docker_bin_dir: /usr/bin + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# Can be dnsmasq_kubedns, kubedns or none +dns_mode: dnsmasq_kubedns +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: docker_dns +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernets. +# This puts them in a sane location and namespace. +# Editting those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" +system_namespace: kube-system + +# Logging directory (sysvinit systems) +kube_log_dir: "/var/log/kubernetes" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +# This is where to save basic auth file +kube_users_dir: "{{ kube_config_dir }}/users" + + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Users to create for basic auth in Kubernetes API via HTTP +kube_api_pwd: "changeme" +kube_users: + kube: + pass: "{{kube_api_pwd}}" + role: admin + root: + pass: "{{kube_api_pwd}}" + role: admin + +# Choose network plugin (calico, weave or flannel) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.233.64.0/18 + +# internal network node size allocation (optional). This is the size allocated +# to each node on your network. With these defaults you should have +# room for 4096 nodes with 254 pods per node. +kube_network_node_prefix: 24 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 443 # (https) +kube_apiserver_insecure_port: 8080 # (http) + +# Path used to store Docker data +docker_daemon_graph: "/var/lib/docker" + +## A string of extra options to pass to the docker daemon. +## This string should be exactly as you wish it to appear. +## An obvious use case is allowing insecure-registry access +## to self hosted registries like so: +docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false" + +# Settings for containerized control plane (etcd/kubelet/secrets) +etcd_deployment_type: docker +kubelet_deployment_type: docker +cert_management: script +vault_deployment_type: docker + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent +efk_enabled: false diff --git a/roles/kargo-defaults/tasks/main.yaml b/roles/kargo-defaults/tasks/main.yaml new file mode 100644 index 000000000..91d0bc463 --- /dev/null +++ b/roles/kargo-defaults/tasks/main.yaml @@ -0,0 +1,5 @@ +- name: Configure defaults + debug: + msg: "Check roles/kargo-defaults/defaults/main.yml" + tags: + - always diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index ac5e4e4aa..c15a7539e 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -34,6 +34,7 @@ register: running_pods - set_fact: + kube_pods_subnet: 10.233.64.0/18 pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}" pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}" pods_hostnet: | diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 1be9c9cab..1eefc5ec5 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -2,8 +2,8 @@ - hosts: localhost gather_facts: False roles: - - bastion-ssh-config - tags: [localhost, bastion] + - { role: kargo-defaults} + - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: true @@ -13,9 +13,8 @@ # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. ansible_ssh_pipelining: false roles: - - bootstrap-os - tags: - - bootstrap-os + - { role: kargo-defaults} + - { role: bootstrap-os, tags: bootstrap-os} - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: true @@ -26,6 +25,7 @@ - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kubernetes/preinstall, tags: preinstall } - { role: docker, tags: docker } @@ -36,21 +36,25 @@ - hosts: etcd:k8s-cluster:vault any_errors_fatal: true roles: + - { role: kargo-defaults, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - hosts: etcd:!k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: etcd, tags: etcd } - hosts: k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: etcd, tags: etcd } - hosts: etcd:k8s-cluster:vault any_errors_fatal: true roles: + - { role: kargo-defaults, when: "cert_management == 'vault'"}} - { role: vault, tags: vault, when: "cert_management == 'vault'"} #Handle upgrades to master components first to maintain backwards compat. @@ -58,6 +62,7 @@ any_errors_fatal: true serial: 1 roles: + - { role: kargo-defaults} - { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } @@ -69,6 +74,7 @@ any_errors_fatal: true serial: "{{ serial | default('20%') }}" roles: + - { role: kargo-defaults} - { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: kubernetes/node, tags: node } - { role: network_plugin, tags: network } @@ -78,15 +84,18 @@ - hosts: calico-rr any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: network_plugin/calico/rr, tags: network } - hosts: k8s-cluster any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - hosts: kube-master[0] any_errors_fatal: true roles: + - { role: kargo-defaults} - { role: kubernetes-apps, tags: apps }