Use a variable for standardizing kubectl invocation (#8329)
* Add kubectl variable * Replace kubectl usage by kubectl variable in roles * Remove redundant --kubeconfig on kubectl usage * Replace unecessary shell usage with commandpull/8372/head
parent
3eab1129b9
commit
cb54eb40ce
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Register coredns deployment annotation `createdby`
|
- name: Kubernetes Apps | Register coredns deployment annotation `createdby`
|
||||||
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
|
command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
|
||||||
register: createdby_annotation
|
register: createdby_annotation
|
||||||
changed_when: false
|
changed_when: false
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
tags: vsphere-csi-driver
|
tags: vsphere-csi-driver
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Generate a CSI secret manifest
|
- name: vSphere CSI Driver | Generate a CSI secret manifest
|
||||||
command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
||||||
register: vsphere_csi_secret_manifest
|
register: vsphere_csi_secret_manifest
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
no_log: true
|
no_log: true
|
||||||
|
@ -37,7 +37,7 @@
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Apply a CSI secret manifest
|
- name: vSphere CSI Driver | Apply a CSI secret manifest
|
||||||
command:
|
command:
|
||||||
cmd: "{{ bin_dir }}/kubectl apply -f -"
|
cmd: "{{ kubectl }} apply -f -"
|
||||||
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: CephFS Provisioner | Remove legacy namespace
|
- name: CephFS Provisioner | Remove legacy namespace
|
||||||
shell: |
|
command: >
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
{{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }}
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
@ -19,8 +19,8 @@
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: CephFS Provisioner | Remove legacy storageclass
|
- name: CephFS Provisioner | Remove legacy storageclass
|
||||||
shell: |
|
command: >
|
||||||
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
{{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }}
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: RBD Provisioner | Remove legacy namespace
|
- name: RBD Provisioner | Remove legacy namespace
|
||||||
shell: |
|
command: >
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
|
{{ kubectl }} delete namespace {{ rbd_provisioner_namespace }}
|
||||||
ignore_errors: true # noqa ignore-errrors
|
ignore_errors: true # noqa ignore-errrors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
@ -19,8 +19,8 @@
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: RBD Provisioner | Remove legacy storageclass
|
- name: RBD Provisioner | Remove legacy storageclass
|
||||||
shell: |
|
command: >
|
||||||
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
|
{{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }}
|
||||||
ignore_errors: true # noqa ignore-errrors
|
ignore_errors: true # noqa ignore-errrors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- name: Cert Manager | Remove legacy namespace
|
- name: Cert Manager | Remove legacy namespace
|
||||||
shell: |
|
command: >
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
{{ kubectl }} delete namespace {{ cert_manager_namespace }}
|
||||||
ignore_errors: true # noqa ignore-errors
|
ignore_errors: true # noqa ignore-errors
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: kube-router | Wait for kube-router pods to be ready
|
- name: kube-router | Wait for kube-router pods to be ready
|
||||||
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
|
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
|
@ -190,7 +190,7 @@
|
||||||
|
|
||||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||||
- name: kubeadm | Remove taint for master with node role
|
- name: kubeadm | Remove taint for master with node role
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}"
|
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
|
||||||
delegate_to: "{{ first_kube_control_plane }}"
|
delegate_to: "{{ first_kube_control_plane }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "node-role.kubernetes.io/master:NoSchedule-"
|
- "node-role.kubernetes.io/master:NoSchedule-"
|
||||||
|
|
|
@ -61,8 +61,7 @@
|
||||||
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
|
||||||
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl
|
{{ kubectl }}
|
||||||
--kubeconfig {{ kube_config_dir }}/admin.conf
|
|
||||||
-n kube-system
|
-n kube-system
|
||||||
scale deployment/coredns --replicas 0
|
scale deployment/coredns --replicas 0
|
||||||
register: scale_down_coredns
|
register: scale_down_coredns
|
||||||
|
|
|
@ -115,9 +115,9 @@
|
||||||
# incorrectly to first master, creating SPoF.
|
# incorrectly to first master, creating SPoF.
|
||||||
- name: Update server field in kube-proxy kubeconfig
|
- name: Update server field in kube-proxy kubeconfig
|
||||||
shell: >-
|
shell: >-
|
||||||
set -o pipefail && {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
|
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
|
||||||
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
||||||
| {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
|
| {{ kubectl }} replace -f -
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -139,7 +139,7 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
|
|
||||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||||
delegate_facts: false
|
delegate_facts: false
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
|
|
||||||
- name: Set label to node
|
- name: Set label to node
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
|
{{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
|
||||||
loop: "{{ role_node_labels + inventory_node_labels }}"
|
loop: "{{ role_node_labels + inventory_node_labels }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -138,6 +138,10 @@ kube_config_dir: /etc/kubernetes
|
||||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
|
|
||||||
|
# Kubectl command
|
||||||
|
# This is for consistency when using kubectl command in roles, and ensure
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf"
|
||||||
|
|
||||||
# This is where all the cert scripts and certs will be located
|
# This is where all the cert scripts and certs will be located
|
||||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
|
|
||||||
- name: Calico | Get kubelet hostname
|
- name: Calico | Get kubelet hostname
|
||||||
shell: >-
|
shell: >-
|
||||||
set -o pipefail && {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
||||||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Calico | Check if typha-server exists
|
- name: Calico | Check if typha-server exists
|
||||||
command: "{{ bin_dir }}/kubectl -n kube-system get secret typha-server"
|
command: "{{ kubectl }} -n kube-system get secret typha-server"
|
||||||
register: typha_server_secret
|
register: typha_server_secret
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
|
|
||||||
- name: Calico | Create typha tls secrets
|
- name: Calico | Create typha tls secrets
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl -n kube-system
|
{{ kubectl }} -n kube-system
|
||||||
create secret tls {{ item.name }}
|
create secret tls {{ item.name }}
|
||||||
--cert {{ item.cert }}
|
--cert {{ item.cert }}
|
||||||
--key {{ item.key }}
|
--key {{ item.key }}
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
||||||
- name: Cilium | Wait for pods to run
|
- name: Cilium | Wait for pods to run
|
||||||
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("cilium")==-1
|
until: pods_not_ready.stdout.find("cilium")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: Kube-OVN | Label ovn-db node
|
- name: Kube-OVN | Label ovn-db node
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
|
{{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube_control_plane'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
|
|
|
@ -1,20 +1,20 @@
|
||||||
---
|
---
|
||||||
- name: kube-router | Add annotations on kube_control_plane
|
- name: kube-router | Add annotations on kube_control_plane
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_master }}"
|
- "{{ kube_router_annotations_master }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
|
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']
|
||||||
|
|
||||||
- name: kube-router | Add annotations on kube_node
|
- name: kube-router | Add annotations on kube_node
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_node }}"
|
- "{{ kube_router_annotations_node }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
|
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']
|
||||||
|
|
||||||
- name: kube-router | Add common annotations on all servers
|
- name: kube-router | Add common annotations on all servers
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_all }}"
|
- "{{ kube_router_annotations_all }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Macvlan | Retrieve Pod Cidr
|
- name: Macvlan | Retrieve Pod Cidr
|
||||||
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
|
command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: node_pod_cidr_cmd
|
register: node_pod_cidr_cmd
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
- name: ovn4nfv | Label control-plane node
|
||||||
|
command: >-
|
||||||
|
{{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
|
- name: ovn4nfv | Create ovn4nfv-k8s manifests
|
||||||
|
template:
|
||||||
|
src: "{{ item.file }}.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
|
with_items:
|
||||||
|
- {name: ovn-daemonset, file: ovn-daemonset.yml}
|
||||||
|
- {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml}
|
||||||
|
register: ovn4nfv_node_manifests
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Wait for apiserver
|
- name: Wait for apiserver
|
||||||
command: "{{ bin_dir }}/kubectl get nodes"
|
command: "{{ kubectl }} get nodes"
|
||||||
environment:
|
environment:
|
||||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||||
register: apiserver_is_ready
|
register: apiserver_is_ready
|
||||||
|
@ -11,7 +11,7 @@
|
||||||
when: groups['broken_kube_control_plane']
|
when: groups['broken_kube_control_plane']
|
||||||
|
|
||||||
- name: Delete broken kube_control_plane nodes from cluster
|
- name: Delete broken kube_control_plane nodes from cluster
|
||||||
command: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
command: "{{ kubectl }} delete node {{ item }}"
|
||||||
environment:
|
environment:
|
||||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||||
with_items: "{{ groups['broken_kube_control_plane'] }}"
|
with_items: "{{ groups['broken_kube_control_plane'] }}"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: Delete node
|
- name: Delete node
|
||||||
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||||
when: inventory_hostname in groups['k8s_cluster']
|
when: inventory_hostname in groups['k8s_cluster']
|
||||||
retries: 10
|
retries: 10
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: remove-node | List nodes
|
- name: remove-node | List nodes
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %}
|
{{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %}
|
||||||
register: nodes
|
register: nodes
|
||||||
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
delegate_to: "{{ groups['kube_control_plane']|first }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
- name: remove-node | Drain node except daemonsets resource # noqa 301
|
- name: remove-node | Drain node except daemonsets resource # noqa 301
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf drain
|
{{ kubectl }} drain
|
||||||
--force
|
--force
|
||||||
--ignore-daemonsets
|
--ignore-daemonsets
|
||||||
--grace-period {{ drain_grace_period }}
|
--grace-period {{ drain_grace_period }}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
- name: Lookup node IP in kubernetes
|
- name: Lookup node IP in kubernetes
|
||||||
shell: >-
|
command: >
|
||||||
{{ bin_dir }}/kubectl get nodes {{ node }}
|
{{ kubectl }} get nodes {{ node }}
|
||||||
-o jsonpath='{range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}'
|
-o jsonpath={range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}
|
||||||
register: remove_node_ip
|
register: remove_node_ip
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['etcd']
|
- inventory_hostname in groups['etcd']
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
- needs_cordoning|default(false)
|
- needs_cordoning|default(false)
|
||||||
- kube_network_plugin == 'cilium'
|
- kube_network_plugin == 'cilium'
|
||||||
command: >
|
command: >
|
||||||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf
|
{{ kubectl }}
|
||||||
wait pod -n kube-system -l k8s-app=cilium
|
wait pod -n kube-system -l k8s-app=cilium
|
||||||
--field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}'
|
--field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}'
|
||||||
--for=condition=Ready
|
--for=condition=Ready
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
|
|
||||||
- name: Uncordon node
|
- name: Uncordon node
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
|
command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
when:
|
when:
|
||||||
- needs_cordoning|default(false)
|
- needs_cordoning|default(false)
|
||||||
|
|
|
@ -17,9 +17,9 @@
|
||||||
# Node Ready: type = ready, status = True
|
# Node Ready: type = ready, status = True
|
||||||
# Node NotReady: type = ready, status = Unknown
|
# Node NotReady: type = ready, status = Unknown
|
||||||
- name: See if node is in ready state
|
- name: See if node is in ready state
|
||||||
shell: >-
|
command: >
|
||||||
{{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
|
{{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
|
||||||
-o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
|
-o jsonpath={ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }
|
||||||
register: kubectl_node_ready
|
register: kubectl_node_ready
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -28,9 +28,9 @@
|
||||||
# SchedulingDisabled: unschedulable = true
|
# SchedulingDisabled: unschedulable = true
|
||||||
# else unschedulable key doesn't exist
|
# else unschedulable key doesn't exist
|
||||||
- name: See if node is schedulable
|
- name: See if node is schedulable
|
||||||
shell: >-
|
command: >
|
||||||
{{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
|
{{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
|
||||||
-o jsonpath='{ .spec.unschedulable }'
|
-o jsonpath={ .spec.unschedulable }
|
||||||
register: kubectl_node_schedulable
|
register: kubectl_node_schedulable
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -48,11 +48,11 @@
|
||||||
- name: Node draining
|
- name: Node draining
|
||||||
block:
|
block:
|
||||||
- name: Cordon node
|
- name: Cordon node
|
||||||
command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}"
|
command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
|
|
||||||
- name: Check kubectl version
|
- name: Check kubectl version
|
||||||
command: "{{ bin_dir }}/kubectl version --client --short"
|
command: "{{ kubectl }} version --client --short"
|
||||||
register: kubectl_version
|
register: kubectl_version
|
||||||
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
delegate_to: "{{ groups['kube_control_plane'][0] }}"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
@ -70,7 +70,7 @@
|
||||||
|
|
||||||
- name: Drain node
|
- name: Drain node
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl drain
|
{{ kubectl }} drain
|
||||||
--force
|
--force
|
||||||
--ignore-daemonsets
|
--ignore-daemonsets
|
||||||
--grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }}
|
--grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }}
|
||||||
|
@ -98,7 +98,7 @@
|
||||||
|
|
||||||
- name: Drain node - fallback with disabled eviction
|
- name: Drain node - fallback with disabled eviction
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl drain
|
{{ kubectl }} drain
|
||||||
--force
|
--force
|
||||||
--ignore-daemonsets
|
--ignore-daemonsets
|
||||||
--grace-period {{ drain_fallback_grace_period }}
|
--grace-period {{ drain_fallback_grace_period }}
|
||||||
|
@ -117,7 +117,7 @@
|
||||||
|
|
||||||
rescue:
|
rescue:
|
||||||
- name: Set node back to schedulable
|
- name: Set node back to schedulable
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ inventory_hostname }}"
|
command: "{{ kubectl }} uncordon {{ inventory_hostname }}"
|
||||||
when: upgrade_node_uncordon_after_drain_failure
|
when: upgrade_node_uncordon_after_drain_failure
|
||||||
- name: Fail after rescue
|
- name: Fail after rescue
|
||||||
fail:
|
fail:
|
||||||
|
|
|
@ -12,9 +12,9 @@
|
||||||
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
|
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
|
||||||
- name: Check current nodeselector for kube-proxy daemonset
|
- name: Check current nodeselector for kube-proxy daemonset
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf
|
{{ kubectl }}
|
||||||
get ds kube-proxy --namespace=kube-system
|
get ds kube-proxy --namespace=kube-system
|
||||||
-o jsonpath='{.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}}'
|
-o jsonpath={.spec.template.spec.nodeSelector.{{ kube_proxy_nodeselector | regex_replace('\.', '\\.') }}}
|
||||||
register: current_kube_proxy_state
|
register: current_kube_proxy_state
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
|
@ -22,8 +22,8 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: Apply nodeselector patch for kube-proxy daemonset
|
- name: Apply nodeselector patch for kube-proxy daemonset
|
||||||
shell: >-
|
command: >
|
||||||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf
|
{{ kubectl }}
|
||||||
patch ds kube-proxy --namespace=kube-system --type=strategic -p
|
patch ds kube-proxy --namespace=kube-system --type=strategic -p
|
||||||
'{"spec":{"template":{"spec":{"nodeSelector":{"{{ kube_proxy_nodeselector }}":"linux"} }}}}'
|
'{"spec":{"template":{"spec":{"nodeSelector":{"{{ kube_proxy_nodeselector }}":"linux"} }}}}'
|
||||||
register: patch_kube_proxy_state
|
register: patch_kube_proxy_state
|
||||||
|
|
Loading…
Reference in New Issue