project: fix var-spacing ansible rule (#10266)

* project: fix var-spacing ansible rule

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing on the beginning/end of jinja template

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing of default filter

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix spacing between filter arguments

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix double space at beginning/end of jinja

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

* project: fix remaining jinja[spacing] ansible-lint warning

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>

---------

Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@proton.ch>
pull/10277/head
Arthur Outhenin-Chalandre 2023-07-05 05:36:54 +02:00 committed by GitHub
parent f8b93fa88a
commit 5d00b851ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
178 changed files with 767 additions and 733 deletions

View File

@ -16,7 +16,6 @@ skip_list:
# In Kubespray we use variables that use camelCase to match their k8s counterparts
# (Disabled in June 2021)
- 'var-naming'
- 'var-spacing'
# [fqcn-builtins]
# Roles in kubespray don't need fully qualified collection names

View File

@ -0,0 +1,8 @@
# This file contains ignores rule violations for ansible-lint
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
roles/kubernetes/node/defaults/main.yml jinja[spacing]
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
roles/kubespray-defaults/defaults/main.yaml jinja[spacing]

View File

@ -90,6 +90,7 @@
- name: "Check_certs | Set 'gen_master_certs' object to track whether member and admin certs exist on first etcd node"
set_fact:
# noqa: jinja[spacing]
gen_master_certs: |-
{
{% set etcd_members = groups['etcd'] -%}
@ -112,6 +113,7 @@
- name: "Check_certs | Set 'gen_node_certs' object to track whether node certs exist on first etcd node"
set_fact:
# noqa: jinja[spacing]
gen_node_certs: |-
{
{% set k8s_nodes = groups['k8s_cluster'] -%}

View File

@ -14,6 +14,7 @@
- include_tasks: refresh_config.yml
vars:
# noqa: jinja[spacing]
etcd_events_peer_addresses: >-
{% for host in groups['etcd'] -%}
{%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%}

View File

@ -15,6 +15,7 @@
- include_tasks: refresh_config.yml
vars:
# noqa: jinja[spacing]
etcd_peer_addresses: >-
{% for host in groups['etcd'] -%}
{%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%}

View File

@ -1,6 +1,7 @@
---
- name: Kubernetes Apps | set up necessary nodelocaldns parameters
set_fact:
# noqa: jinja[spacing]
primaryClusterIP: >-
{%- if dns_mode in ['coredns', 'coredns_dual'] -%}
{{ skydns_server }}
@ -26,6 +27,7 @@
- { name: nodelocaldns, file: nodelocaldns-daemonset.yml, type: daemonset }
register: nodelocaldns_manifests
vars:
# noqa: jinja[spacing]
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
@ -54,12 +56,14 @@
- { name: nodelocaldns, file: nodelocaldns-second-daemonset.yml, type: daemonset }
register: nodelocaldns_second_manifests
vars:
# noqa: jinja[spacing]
forwardTarget: >-
{%- if secondaryclusterIP is defined and dns_mode == 'coredns_dual' -%}
{{ primaryClusterIP }} {{ secondaryclusterIP }}
{%- else -%}
{{ primaryClusterIP }}
{%- endif -%}
# noqa: jinja[spacing]
upstreamForwardTarget: >-
{%- if upstream_dns_servers is defined and upstream_dns_servers | length > 0 -%}
{{ upstream_dns_servers | join(' ') }}

View File

@ -1,6 +1,7 @@
---
- name: Set external kube-apiserver endpoint
set_fact:
# noqa: jinja[spacing]
external_apiserver_address: >-
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined -%}
{{ loadbalancer_apiserver.address }}
@ -9,6 +10,7 @@
{%- else -%}
{{ kube_apiserver_access_address }}
{%- endif -%}
# noqa: jinja[spacing]
external_apiserver_port: >-
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and loadbalancer_apiserver.port is defined -%}
{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}

View File

@ -1,6 +1,7 @@
---
- name: Set kubeadm_discovery_address
set_fact:
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}

View File

@ -1,6 +1,7 @@
---
- name: Set kubeadm_discovery_address
set_fact:
# noqa: jinja[spacing]
kubeadm_discovery_address: >-
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_control_plane_address }}:{{ kube_apiserver_port }}

View File

@ -102,6 +102,7 @@
- name: Ensure ping package
package:
# noqa: jinja[spacing]
name: >-
{%- if ansible_os_family == 'Debian' -%}
iputils-ping

View File

@ -17,6 +17,7 @@
- name: Set fact NTP settings
set_fact:
# noqa: jinja[spacing]
ntp_config_file: >-
{% if ntp_package == "ntp" -%}
/etc/ntp.conf
@ -25,6 +26,7 @@
{%- else -%}
/etc/chrony/chrony.conf
{%- endif -%}
# noqa: jinja[spacing]
ntp_service_name: >-
{% if ntp_package == "chrony" -%}
chronyd
@ -51,6 +53,7 @@
- ntp_force_sync_immediately
- name: Force Sync NTP Immediately
# noqa: jinja[spacing]
command: >-
timeout -k 60s 60s
{% if ntp_package == "ntp" -%}

View File

@ -1,6 +1,7 @@
---
- name: Set no_proxy to all assigned cluster IPs and hostnames
set_fact:
# noqa: jinja[spacing]
no_proxy_prepare: >-
{%- if loadbalancer_apiserver is defined -%}
{{ apiserver_loadbalancer_domain_name | default('') }},
@ -32,6 +33,7 @@
- name: Populates no_proxy to all hosts
set_fact:
no_proxy: "{{ hostvars.localhost.no_proxy_prepare }}"
# noqa: jinja[spacing]
proxy_env: "{{ proxy_env | combine({
'no_proxy': hostvars.localhost.no_proxy_prepare,
'NO_PROXY': hostvars.localhost.no_proxy_prepare

View File

@ -24,6 +24,7 @@
retries: 10
- name: Calico-rr | Set route reflector cluster ID
# noqa: jinja[spacing]
set_fact:
calico_rr_node_patched: >-
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':

View File

@ -309,6 +309,7 @@
- name: Calico | Set kubespray BGP Configuration
set_fact:
# noqa: jinja[spacing]
_bgp_config: >
{
"kind": "BGPConfiguration",

View File

@ -7,6 +7,7 @@
- name: Macvlan | reload network
service:
# noqa: jinja[spacing]
name: >-
{% if ansible_os_family == "RedHat" -%}
network

View File

@ -2,6 +2,7 @@
# TODO: Figure out why kubeadm does not fix this
- name: Set etcd-servers fact
set_fact:
# noqa: jinja[spacing]
etcd_servers: >-
{% for host in groups['etcd'] -%}
{% if not loop.last -%}

View File

@ -428,6 +428,7 @@
- name: reset | Restart network
service:
# noqa: jinja[spacing]
name: >-
{% if ansible_os_family == "RedHat" -%}
{%- if ansible_distribution_major_version | int >= 8 or is_fedora_coreos or ansible_distribution == "Fedora" -%}

View File

@ -38,6 +38,7 @@
- name: Set if node needs cordoning
set_fact:
# noqa: jinja[spacing]
needs_cordoning: >-
{% if (kubectl_node_ready.stdout == "True" and not kubectl_node_schedulable.stdout) or upgrade_node_always_cordon -%}
true

View File

@ -57,6 +57,7 @@
- name: set instance names
set_fact:
# noqa: jinja[spacing]
instance_names: >-
{%- if mode in ['separate', 'ha'] -%}
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]

View File

@ -18,6 +18,7 @@
- name: set instance names
set_fact:
# noqa: jinja[spacing]
instance_names: >-
{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3

View File

@ -12,6 +12,7 @@
- name: set instance names
set_fact:
# noqa: jinja[spacing]
instance_names: >-
{%- if mode in ['separate', 'ha'] -%}
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3