From 80379f6cab211af51313e6a46e319c8219cf53a1 Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Thu, 3 Jan 2019 02:04:26 -0600 Subject: [PATCH] Fix kube-proxy configuration for kubeadm (#3958) - Creates and defaults an ansible variable for every configuration option in the `kubeproxy.config.k8s.io/v1alpha1` type spec - Fixes vars that were orphaned by removing non-kubeadm - Fixes previously harcoded kubeadm values - Introduces a `main` directory for role default files per component (requires ansible 2.6.0+) - Split out just `kube-proxy.yml` in this first effort - Removes the kube-proxy server field patch task We should continue to pull out other components from `main.yml` into their own defaults files as I did here for `defaults/main/kube-proxy.yml`. I hope for and will need others to join me in this refactoring across the project until each component config template has a matching role defaults file, with shared defaults in `kubespray-defaults` or `downloads` --- README.md | 2 +- cluster.yml | 2 +- .../group_vars/k8s-cluster/k8s-cluster.yml | 14 ++- remove-node.yml | 2 +- requirements.txt | 2 +- reset.yml | 2 +- roles/kubernetes/kubeadm/tasks/main.yml | 27 ----- .../master/defaults/main/kube-proxy.yml | 105 ++++++++++++++++++ .../master/defaults/{ => main}/main.yml | 0 .../templates/kubeadm-config.v1alpha1.yaml.j2 | 2 +- .../templates/kubeadm-config.v1alpha2.yaml.j2 | 2 +- .../templates/kubeadm-config.v1alpha3.yaml.j2 | 60 +++++----- .../templates/kubeadm-config.v1beta1.yaml.j2 | 60 +++++----- roles/kubernetes/node/defaults/main.yml | 20 +--- roles/kubespray-defaults/defaults/main.yaml | 14 ++- scale.yml | 2 +- upgrade-cluster.yml | 2 +- 17 files changed, 193 insertions(+), 125 deletions(-) create mode 100644 roles/kubernetes/master/defaults/main/kube-proxy.yml rename roles/kubernetes/master/defaults/{ => main}/main.yml (100%) diff --git a/README.md b/README.md index daccaaec9..0db578a79 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,7 @@ plugins can be deployed for a given single cluster. Requirements ------------ -- **Ansible v2.5 (or newer) and python-netaddr is installed on the machine +- **Ansible v2.6 (or newer) and python-netaddr is installed on the machine that will run Ansible commands** - **Jinja 2.9 (or newer) is required to run the Ansible Playbooks** - The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment)) diff --git a/cluster.yml b/cluster.yml index 61e103963..82a377128 100644 --- a/cluster.yml +++ b/cluster.yml @@ -7,7 +7,7 @@ msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed" that: - ansible_version.string is version("2.7.0", "!=") - - ansible_version.string is version("2.5.0", ">=") + - ansible_version.string is version("2.6.0", ">=") tags: - check vars: diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml index eb7269b85..cb4aaa822 100644 --- a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml @@ -97,10 +97,16 @@ kube_apiserver_insecure_port: 0 # (disabled) # Can be ipvs, iptables kube_proxy_mode: ipvs -# Kube-proxy nodeport address. -# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest -kube_proxy_nodeport_addresses: false -# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24 +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} ## Encrypting Secret Data at Rest (experimental) kube_encrypt_secret_data: false diff --git a/remove-node.yml b/remove-node.yml index 77212cced..a4bd8d97a 100644 --- a/remove-node.yml +++ b/remove-node.yml @@ -6,7 +6,7 @@ msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed" that: - ansible_version.string is version("2.7.0", "!=") - - ansible_version.string is version("2.5.0", ">=") + - ansible_version.string is version("2.6.0", ">=") tags: - check vars: diff --git a/requirements.txt b/requirements.txt index e36ab79d4..0d63cc24b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -ansible>=2.5.0,!=2.7.0 +ansible>=2.6.0,!=2.7.0 jinja2>=2.9.6 netaddr pbr>=1.6 diff --git a/reset.yml b/reset.yml index 02f2b14c9..db8e70400 100644 --- a/reset.yml +++ b/reset.yml @@ -6,7 +6,7 @@ msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed" that: - ansible_version.string is version("2.7.0", "!=") - - ansible_version.string is version("2.5.0", ">=") + - ansible_version.string is version("2.6.0", ">=") tags: - check vars: diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index a79433ca5..2fbfac851 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -92,21 +92,6 @@ - kubeadm_discovery_address != kube_apiserver_endpoint notify: restart kubelet -- name: Update server field in kube-proxy kubeconfig - shell: >- - {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml - | sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g' - | {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f - - delegate_to: "{{groups['kube-master']|first}}" - run_once: true - when: - - kubeadm_config_api_fqdn is not defined - - is_kube_master - - kubeadm_discovery_address != kube_apiserver_endpoint - - not kube_proxy_remove - tags: - - kube-proxy - # FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes - name: Symlink kubelet kubeconfig for calico/canal file: @@ -116,18 +101,6 @@ force: yes when: kube_network_plugin in ['calico','canal'] -- name: Restart all kube-proxy pods to ensure that they load the new configmap - shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy" - delegate_to: "{{groups['kube-master']|first}}" - run_once: true - when: - - kubeadm_config_api_fqdn is not defined - - is_kube_master - - kubeadm_discovery_address != kube_apiserver_endpoint - - not kube_proxy_remove - tags: - - kube-proxy - # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776 # is fixed - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services diff --git a/roles/kubernetes/master/defaults/main/kube-proxy.yml b/roles/kubernetes/master/defaults/main/kube-proxy.yml new file mode 100644 index 000000000..b76e3db2e --- /dev/null +++ b/roles/kubernetes/master/defaults/main/kube-proxy.yml @@ -0,0 +1,105 @@ +--- +# bind address for kube-proxy +kube_proxy_bind_address: '0.0.0.0' + +# acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +# default value of 'application/json'. This field will control all connections to the server used by a particular +# client. +kube_proxy_client_accept_content_types: '' + +# burst allows extra queries to accumulate when a client is exceeding its rate. +kube_proxy_client_burst: 10 + +# contentType is the content type used when sending data to the server from this client. +kube_proxy_client_content_type: application/vnd.kubernetes.protobuf + +# kubeconfig is the path to a KubeConfig file. +# Leave as empty string to generate from other fields +kube_proxy_client_kubeconfig: '' + +# qps controls the number of queries per second allowed for this connection. +kube_proxy_client_qps: 5 + +# How often configuration from the apiserver is refreshed. Must be greater than 0. +kube_proxy_config_sync_period: 15m0s + +### Conntrack +# max is the maximum number of NAT connections to track (0 to +# leave as-is). This takes precedence over maxPerCore and min. +kube_proxy_conntrack_max: 'null' + +# maxPerCore is the maximum number of NAT connections to track +# per CPU core (0 to leave the limit as-is and ignore min). +kube_proxy_conntrack_max_per_core: 32768 + +# min is the minimum value of connect-tracking records to allocate, +# regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is). +kube_proxy_conntrack_min: 131072 + +# tcpCloseWaitTimeout is how long an idle conntrack entry +# in CLOSE_WAIT state will remain in the conntrack +# table. (e.g. '60s'). Must be greater than 0 to set. +kube_proxy_conntrack_tcp_close_wait_timeout: 1h0m0s + +# tcpEstablishedTimeout is how long an idle TCP connection will be kept open +# (e.g. '2s'). Must be greater than 0 to set. +kube_proxy_conntrack_tcp_established_timeout: 24h0m0s + +# Enables profiling via web interface on /debug/pprof handler. +# Profiling handlers will be handled by metrics server. +kube_proxy_enable_profiling: false + +# bind address for kube-proxy health check +kube_proxy_healthz_bind_address: 0.0.0.0:10256 + +# If using the pure iptables proxy, SNAT everything. Note that it breaks any +# policy engine. +kube_proxy_masquerade_all: false + +# If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. +# Must be within the range [0, 31]. +kube_proxy_masquerade_bit: 14 + +# The minimum interval of how often the iptables or ipvs rules can be refreshed as +# endpoints and services change (e.g. '5s', '1m', '2h22m'). +kube_proxy_min_sync_period: 0s + +# The maximum interval of how often iptables or ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). +# Must be greater than 0. +kube_proxy_sync_period: 30s + +# A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules. +kube_proxy_exclude_cidrs: 'null' + +# The ipvs scheduler type when proxy mode is ipvs +# rr: round-robin +# lc: least connection +# dh: destination hashing +# sh: source hashing +# sed: shortest expected delay +# nq: never queue +kube_proxy_scheduler: rr + +# The IP address and port for the metrics server to serve on +# (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) +kube_proxy_metrics_bind_address: 127.0.0.1:10249 + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +kube_proxy_nodeport_addresses: '[]' + +# oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000] +kube_proxy_oom_score_adj: -999 + +# portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed +# in order to proxy service traffic. If unspecified, 0, or (0-0) then ports will be randomly chosen. +kube_proxy_port_range: '' + +# resourceContainer is the absolute name of the resource-only container to create and run +# the Kube-proxy in (Default: /kube-proxy). +kube_proxy_resource_container: /kube-proxy + +# udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +# Must be greater than 0. Only applicable for proxyMode=userspace. +kube_proxy_udp_idle_timeout: 250ms \ No newline at end of file diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main/main.yml similarity index 100% rename from roles/kubernetes/master/defaults/main.yml rename to roles/kubernetes/master/defaults/main/main.yml diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 index 41e744bc7..0957824d9 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha1.yaml.j2 @@ -40,7 +40,7 @@ kubeProxy: mode: ipvs {% endif %} {% if kube_proxy_nodeport_addresses %} - nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}] + nodePortAddresses: {{ kube_proxy_nodeport_addresses }} {% endif %} resourceContainer: "" authorizationModes: diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 index 141087d3d..1743d03aa 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha2.yaml.j2 @@ -26,7 +26,7 @@ kubeProxy: config: mode: {{ kube_proxy_mode }} {% if kube_proxy_nodeport_addresses %} - nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}] + nodePortAddresses: {{ kube_proxy_nodeport_addresses }} {% endif %} resourceContainer: "" authorizationModes: diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 index 9cba6a40f..55861c511 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.v1alpha3.yaml.j2 @@ -221,39 +221,37 @@ schedulerExtraVolumes: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -bindAddress: 0.0.0.0 +bindAddress: {{ kube_proxy_bind_address }} clientConnection: - acceptContentTypes: "" - burst: 10 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: /var/lib/kube-proxy/kubeconfig.conf - qps: 5 -clusterCIDR: "" -configSyncPeriod: 15m0s + acceptContentTypes: {{ kube_proxy_client_accept_content_types }} + burst: {{ kube_proxy_client_burst }} + contentType: {{ kube_proxy_client_content_type }} + kubeconfig: {{ kube_proxy_client_kubeconfig }} + qps: {{ kube_proxy_client_kubeconfig }} +clusterCIDR: {{ kube_pods_subnet }} +configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: - max: null - maxPerCore: 32768 - min: 131072 - tcpCloseWaitTimeout: 1h0m0s - tcpEstablishedTimeout: 24h0m0s -enableProfiling: false -healthzBindAddress: 0.0.0.0:10256 + max: {{ kube_proxy_conntrack_max }} + maxPerCore: {{ kube_proxy_conntrack_max_per_core }} + min: {{ kube_proxy_conntrack_min }} + tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} + tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} +enableProfiling: {{ kube_proxy_enable_profiling }} +healthzBindAddress: {{ kube_proxy_healthz_bind_address }} iptables: - masqueradeAll: false - masqueradeBit: 14 - minSyncPeriod: 0s - syncPeriod: 30s + masqueradeAll: {{ kube_proxy_masquerade_all }} + masqueradeBit: {{ kube_proxy_masquerade_bit }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + syncPeriod: {{ kube_proxy_sync_period }} ipvs: - excludeCIDRs: null - minSyncPeriod: 0s - scheduler: "" - syncPeriod: 30s -metricsBindAddress: 127.0.0.1:10249 + excludeCIDRs: {{ kube_proxy_exclude_cidrs }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + scheduler: {{ kube_proxy_scheduler }} + syncPeriod: {{ kube_proxy_sync_period }} +metricsBindAddress: {{ kube_proxy_metrics_bind_address }} mode: {{ kube_proxy_mode }} -{% if kube_proxy_nodeport_addresses %} -nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}] -{% endif %} -oomScoreAdj: -999 -portRange: "" -resourceContainer: "" -udpIdleTimeout: 250ms +nodePortAddresses: {{ kube_proxy_nodeport_addresses }} +oomScoreAdj: {{ kube_proxy_oom_score_adj }} +portRange: {{ kube_proxy_port_range }} +resourceContainer: {{ kube_proxy_resource_container }} +udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }} diff --git a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 index f2589c9bb..88876ee74 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.v1beta1.yaml.j2 @@ -227,39 +227,37 @@ scheduler: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -bindAddress: 0.0.0.0 +bindAddress: {{ kube_proxy_bind_address }} clientConnection: - acceptContentTypes: "" - burst: 10 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: /var/lib/kube-proxy/kubeconfig.conf - qps: 5 -clusterCIDR: "" -configSyncPeriod: 15m0s + acceptContentTypes: {{ kube_proxy_client_accept_content_types }} + burst: {{ kube_proxy_client_burst }} + contentType: {{ kube_proxy_client_content_type }} + kubeconfig: {{ kube_proxy_client_kubeconfig }} + qps: {{ kube_proxy_client_kubeconfig }} +clusterCIDR: {{ kube_pods_subnet }} +configSyncPeriod: {{ kube_proxy_config_sync_period }} conntrack: - max: null - maxPerCore: 32768 - min: 131072 - tcpCloseWaitTimeout: 1h0m0s - tcpEstablishedTimeout: 24h0m0s -enableProfiling: false -healthzBindAddress: 0.0.0.0:10256 + max: {{ kube_proxy_conntrack_max }} + maxPerCore: {{ kube_proxy_conntrack_max_per_core }} + min: {{ kube_proxy_conntrack_min }} + tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }} + tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }} +enableProfiling: {{ kube_proxy_enable_profiling }} +healthzBindAddress: {{ kube_proxy_healthz_bind_address }} iptables: - masqueradeAll: false - masqueradeBit: 14 - minSyncPeriod: 0s - syncPeriod: 30s + masqueradeAll: {{ kube_proxy_masquerade_all }} + masqueradeBit: {{ kube_proxy_masquerade_bit }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + syncPeriod: {{ kube_proxy_sync_period }} ipvs: - excludeCIDRs: null - minSyncPeriod: 0s - scheduler: "" - syncPeriod: 30s -metricsBindAddress: 127.0.0.1:10249 + excludeCIDRs: {{ kube_proxy_exclude_cidrs }} + minSyncPeriod: {{ kube_proxy_min_sync_period }} + scheduler: {{ kube_proxy_scheduler }} + syncPeriod: {{ kube_proxy_sync_period }} +metricsBindAddress: {{ kube_proxy_metrics_bind_address }} mode: {{ kube_proxy_mode }} -{% if kube_proxy_nodeport_addresses %} -nodePortAddresses: [{{ kube_proxy_nodeport_addresses_cidr }}] -{% endif %} -oomScoreAdj: -999 -portRange: "" -resourceContainer: "" -udpIdleTimeout: 250ms +nodePortAddresses: {{ kube_proxy_nodeport_addresses }} +oomScoreAdj: {{ kube_proxy_oom_score_adj }} +portRange: {{ kube_proxy_port_range }} +resourceContainer: {{ kube_proxy_resource_container }} +udpIdleTimeout: {{ kube_proxy_udp_idle_timeout }} diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index ecd75e3cc..99c1b8c86 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -11,16 +11,6 @@ kubelet_bind_address: "{{ ip | default('0.0.0.0') }}" # resolv.conf to base dns config kube_resolv_conf: "/etc/resolv.conf" -# bind address for kube-proxy health check -kube_proxy_healthz_bind_address: "127.0.0.1" - -# Can be ipvs, iptables -kube_proxy_mode: ipvs - -# If using the pure iptables proxy, SNAT everything. Note that it breaks any -# policy engine. -kube_proxy_masquerade_all: false - # These options reflect limitations of running kubelet in a container. # Modify at your own risk kubelet_enable_cri: true @@ -49,11 +39,7 @@ kube_master_cpu_reserved: 200m kubelet_status_update_frequency: 10s -# Limits for kube components and nginx load balancer app -kube_proxy_memory_limit: 2000M -kube_proxy_cpu_limit: 500m -kube_proxy_memory_requests: 64M -kube_proxy_cpu_requests: 150m +# Limits for nginx load balancer app nginx_memory_limit: 512M nginx_cpu_limit: 300m nginx_memory_requests: 32M @@ -63,10 +49,6 @@ nginx_cpu_requests: 25m # - extensions/v1beta1/daemonsets=true # - extensions/v1beta1/deployments=true -nginx_image_repo: nginx -nginx_image_tag: 1.13 -nginx_config_dir: "/etc/nginx" - kubelet_flexvolumes_plugins_dir: /var/lib/kubelet/volume-plugins # A port range to reserve for services with NodePort visibility. diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 269e1cae9..c5e8f55f6 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -17,10 +17,16 @@ kube_version: v1.13.1 ## Kube Proxy mode One of ['iptables','ipvs'] kube_proxy_mode: ipvs -# Kube-proxy nodeport address. -# cidr to bind nodeport services. Flag --nodeport-addresses on kube-proxy manifest -kube_proxy_nodeport_addresses: false -# kube_proxy_nodeport_addresses_cidr: 10.0.1.0/24 +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} # Set to true to allow pre-checks to fail and continue deployment ignore_assert_errors: false diff --git a/scale.yml b/scale.yml index 84bd638d2..13472f661 100644 --- a/scale.yml +++ b/scale.yml @@ -7,7 +7,7 @@ msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed" that: - ansible_version.string is version("2.7.0", "!=") - - ansible_version.string is version("2.5.0", ">=") + - ansible_version.string is version("2.6.0", ">=") tags: - check vars: diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index e542cc800..59e2d988c 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -7,7 +7,7 @@ msg: "Ansible V2.7.0 can't be used until: https://github.com/ansible/ansible/issues/46600 is fixed" that: - ansible_version.string is version("2.7.0", "!=") - - ansible_version.string is version("2.5.0", ">=") + - ansible_version.string is version("2.6.0", ">=") tags: - check vars: