pre-commit: apply autofixes hooks and fix the rest manually

- markdownlint (manual fix)
- end-of-file-fixer
- requirements-txt-fixer
- trailing-whitespace
lean/pre-commit-hook-2
Max Gautier 2024-05-21 20:17:05 +02:00
parent 77bfb53455
commit d50f61eae5
No known key found for this signature in database
44 changed files with 42 additions and 47 deletions

View File

@ -72,6 +72,7 @@ The setup looks like following
```bash
./generate-inventory.sh > sample-inventory/inventory.ini
```
* Export Variables:

View File

@ -146,4 +146,4 @@ server_groups = {
# ]
# anti_affinity_policy = "yes"
# }
}
}

View File

@ -558,4 +558,4 @@ resource "upcloud_server_group" "server_groups" {
anti_affinity_policy = each.value.anti_affinity_policy
labels = {}
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
}
}

View File

@ -106,4 +106,4 @@ variable "server_groups" {
anti_affinity_policy = string
servers = list(string)
}))
}
}

View File

@ -146,4 +146,4 @@ server_groups = {
# ]
# anti_affinity_policy = "yes"
# }
}
}

View File

@ -8,7 +8,6 @@ Examples of what broken means in this context:
* One or more bare metal node(s) suffer from unrecoverable hardware failure
* One or more node(s) fail during patching or upgrading
* Etcd database corruption
* Other node related failures leaving your control plane degraded or nonfunctional
__Note that you need at least one functional node to be able to recover using this method.__

View File

@ -32,4 +32,4 @@
# etcd_experimental_enable_distributed_tracing: false
# etcd_experimental_distributed_tracing_sample_rate: 100
# etcd_experimental_distributed_tracing_address: "localhost:4317"
# etcd_experimental_distributed_tracing_service_name: etcd
# etcd_experimental_distributed_tracing_service_name: etcd

View File

@ -2,9 +2,9 @@ ansible==9.5.1
cryptography==42.0.7
jinja2==3.1.4
jmespath==1.0.1
jsonschema==4.22.0
MarkupSafe==2.1.5
netaddr==1.2.1
pbr==6.0.0
ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.8
jsonschema==4.22.0

View File

@ -116,4 +116,4 @@ containerd_tracing_enabled: false
containerd_tracing_endpoint: "0.0.0.0:4317"
containerd_tracing_protocol: "grpc"
containerd_tracing_sampling_ratio: 1.0
containerd_tracing_service_name: "containerd"
containerd_tracing_service_name: "containerd"

View File

@ -107,4 +107,3 @@ oom_score = {{ containerd_oom_score }}
sampling_ratio = {{ containerd_tracing_sampling_ratio }}
service_name = "{{ containerd_tracing_service_name }}"
{% endif %}

View File

@ -124,4 +124,4 @@ unsafe_show_logs: false
etcd_experimental_enable_distributed_tracing: false
etcd_experimental_distributed_tracing_sample_rate: 100
etcd_experimental_distributed_tracing_address: "localhost:4317"
etcd_experimental_distributed_tracing_service_name: etcd
etcd_experimental_distributed_tracing_service_name: etcd

View File

@ -162,4 +162,4 @@ metadata:
name: pd.csi.storage.gke.io
spec:
attachRequired: true
podInfoOnMount: false
podInfoOnMount: false

View File

@ -109,4 +109,4 @@ spec:
# See "special case". This will tolerate everything. Node component should
# be scheduled on all nodes.
tolerations:
- operator: Exists
- operator: Exists

View File

@ -6,4 +6,4 @@ provisioner: pd.csi.storage.gke.io
parameters:
type: pd-balanced
replication-type: regional-pd
volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: WaitForFirstConsumer

View File

@ -5,4 +5,4 @@ metadata:
provisioner: pd.csi.storage.gke.io
parameters:
type: pd-balanced
volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: WaitForFirstConsumer

View File

@ -18,7 +18,7 @@ data:
"max-pvscsi-targets-per-vm": "true"
"multi-vcenter-csi-topology": "true"
"csi-internal-generated-cluster-id": "true"
"listview-tasks": "true"
"listview-tasks": "true"
{% if vsphere_csi_controller is version('v2.7.0', '>=') %}
"improved-csi-idempotency": "true"
"improved-volume-topology": "true"

View File

@ -9,4 +9,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
namespace: kube-system

View File

@ -110,4 +110,4 @@ rules:
- list
- watch
apiGroups:
- discovery.k8s.io
- discovery.k8s.io

View File

@ -32,4 +32,3 @@ data:
- name: helper-pod
image: "{{ local_path_provisioner_helper_image_repo }}:{{ local_path_provisioner_helper_image_tag }}"
imagePullPolicy: IfNotPresent

View File

@ -15,4 +15,4 @@ rules:
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
verbs: [ "get", "list", "watch" ]

View File

@ -13,4 +13,4 @@ metallb_speaker_tolerations:
key: node-role.kubernetes.io/control-plane
operator: Exists
metallb_controller_tolerations: []
metallb_loadbalancer_class: ""
metallb_loadbalancer_class: ""

View File

@ -11,4 +11,3 @@ subjects:
- kind: ServiceAccount
name: {{ node_feature_discovery_worker_sa_name }}
namespace: {{ node_feature_discovery_namespace }}

View File

@ -194,4 +194,4 @@ spec:
type: object
type: object
served: true
storage: true
storage: true

View File

@ -25,4 +25,4 @@ data:
{% if scheduler_plugins_plugin_config is defined and scheduler_plugins_plugin_config | length != 0 %}
pluginConfig:
{{ scheduler_plugins_plugin_config | to_nice_yaml(indent=2, width=256) | indent(6, true) }}
{% endif %}
{% endif %}

View File

@ -71,4 +71,4 @@ spec:
volumes:
- name: scheduler-config
configMap:
name: scheduler-config
name: scheduler-config

View File

@ -4,4 +4,4 @@ kind: Namespace
metadata:
name: {{ scheduler_plugins_namespace }}
labels:
name: {{ scheduler_plugins_namespace }}
name: {{ scheduler_plugins_namespace }}

View File

@ -145,4 +145,4 @@ spec:
type: object
type: object
served: true
storage: true
storage: true

View File

@ -137,4 +137,4 @@ subjects:
namespace: {{ scheduler_plugins_namespace }}
- kind: ServiceAccount
name: scheduler-plugins-controller
namespace: {{ scheduler_plugins_namespace }}
namespace: {{ scheduler_plugins_namespace }}

View File

@ -8,4 +8,4 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: scheduler-plugins-controller
namespace: {{ scheduler_plugins_namespace }}
namespace: {{ scheduler_plugins_namespace }}

View File

@ -79,4 +79,4 @@ spec:
served: true
storage: true
subresources:
status: {}
status: {}

View File

@ -94,4 +94,4 @@ spec:
served: true
storage: true
subresources:
status: {}
status: {}

View File

@ -150,4 +150,4 @@ spec:
- zones
type: object
served: true
storage: true
storage: true

View File

@ -1,4 +1,4 @@
apiVersion: apiserver.config.k8s.io/v1beta1
kind: TracingConfiguration
endpoint: {{ kube_apiserver_tracing_endpoint }}
samplingRatePerMillion: {{ kube_apiserver_tracing_sampling_rate_per_million }}
samplingRatePerMillion: {{ kube_apiserver_tracing_sampling_rate_per_million }}

View File

@ -174,4 +174,4 @@ topologyManagerScope: {{ kubelet_topology_manager_scope }}
tracing:
endpoint: {{ kubelet_tracing_endpoint }}
samplingRatePerMillion: {{ kubelet_tracing_sampling_rate_per_million }}
{% endif %}
{% endif %}

View File

@ -102,4 +102,3 @@ data:
}
]
}

View File

@ -134,7 +134,7 @@ data:
## DSR setting
bpf-lb-mode: "{{ cilium_loadbalancer_mode }}"
# l2
# l2
enable-l2-announcements: "{{ cilium_l2announcements }}"
# Enable Bandwidth Manager

View File

@ -140,7 +140,7 @@ rules:
verbs:
- list
- watch
{% if cilium_version %}
{% if cilium_version %}
- apiGroups:
- coordination.k8s.io
resources:

View File

@ -12,10 +12,10 @@ data:
peer-service: "hubble-peer.kube-system.svc.{{ dns_domain }}:443"
listen-address: :4245
metrics-listen-address: ":9966"
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
dial-timeout:
retry-timeout:
sort-buffer-len-max:
sort-buffer-drain-timeout:
tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt
tls-client-key-file: /var/lib/hubble-relay/tls/client.key
tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt

View File

@ -102,4 +102,3 @@ spec:
protocol: TCP
targetPort: 4244
internalTrafficPolicy: Local

View File

@ -1530,4 +1530,4 @@ spec:
subresources:
status: {}
conversion:
strategy: None
strategy: None

View File

@ -1,3 +1,3 @@
---
- name: Scale the cluster
ansible.builtin.import_playbook: playbooks/scale.yml
ansible.builtin.import_playbook: playbooks/scale.yml

View File

@ -61,7 +61,7 @@ def main():
for ip in conn.network.ips():
fn_if_old(conn.network.delete_ip, ip)
# After removing unnecessary subnet from router, retry to delete ports
map_if_old(conn.network.delete_port,
conn.network.ports())

View File

@ -6,4 +6,4 @@ $libvirt_volume_cache = "unsafe"
# Checking for box update can trigger API rate limiting
# https://www.vagrantup.com/docs/vagrant-cloud/request-limits.html
$box_check_update = false
$vm_cpus = 2
$vm_cpus = 2

View File

@ -5,8 +5,8 @@ ara[server]==1.7.1
dopy==0.3.7
molecule==24.2.1
molecule-plugins[vagrant]==23.5.3
python-vagrant==1.0.0
pytest-testinfra==10.1.0
python-vagrant==1.0.0
tox==4.15.0
yamllint==1.35.1
tzdata==2024.1
yamllint==1.35.1