mirror of https://github.com/ceph/ceph-ansible.git
lint: commands should not change things
Fix ansible lint 301 error:
[301] Commands should not change things if nothing needs doing
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 5450de58b3
)
pull/6166/head
parent
92b261df89
commit
35e738c681
|
@ -37,14 +37,18 @@
|
|||
set -o pipefail;
|
||||
grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
|
||||
register: old_osd_filesystems
|
||||
changed_when: false
|
||||
|
||||
- name: tear down any existing osd filesystems
|
||||
command: "umount -v {{ item }}"
|
||||
- name: tear down any existing osd filesystem
|
||||
mount:
|
||||
path: "{{ item }}"
|
||||
state: unmounted
|
||||
with_items: "{{ old_osd_filesystems.stdout_lines }}"
|
||||
|
||||
- name: kill all lvm commands that may have been hung
|
||||
command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
## Logcal Vols
|
||||
- name: tear down existing lv for bucket index
|
||||
|
@ -96,7 +100,9 @@
|
|||
## Physical Vols
|
||||
- name: tear down pv for nvme device
|
||||
command: "pvremove --force --yes {{ nvme_device }}"
|
||||
changed_when: false
|
||||
|
||||
- name: tear down pv for each hdd device
|
||||
command: "pvremove --force --yes {{ item }}"
|
||||
changed_when: false
|
||||
with_items: "{{ hdd_devices }}"
|
||||
|
|
|
@ -87,6 +87,7 @@
|
|||
|
||||
- name: ensure cephfs mountpoint(s) are unmounted
|
||||
command: umount -a -t ceph
|
||||
changed_when: false
|
||||
|
||||
- name: find mapped rbd ids
|
||||
find:
|
||||
|
@ -96,6 +97,7 @@
|
|||
|
||||
- name: use sysfs to unmap rbd devices
|
||||
shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
|
||||
changed_when: false
|
||||
with_items: "{{ rbd_mapped_ids.files }}"
|
||||
|
||||
- name: unload ceph kernel modules
|
||||
|
@ -415,6 +417,7 @@
|
|||
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
|
||||
register: encrypted_ceph_partuuid
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: get osd data and lockbox mount points
|
||||
shell: |
|
||||
|
@ -425,9 +428,12 @@
|
|||
|
||||
- name: drop all cache
|
||||
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
|
||||
changed_when: false
|
||||
|
||||
- name: umount osd data partition
|
||||
shell: umount {{ item }}
|
||||
mount:
|
||||
path: "{{ item }}"
|
||||
state: unmounted
|
||||
with_items: "{{ mounted_osd.stdout_lines }}"
|
||||
|
||||
- name: remove osd mountpoint tree
|
||||
|
@ -470,12 +476,14 @@
|
|||
|
||||
- name: get physical sector size
|
||||
command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
|
||||
changed_when: false
|
||||
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
||||
when: encrypted_ceph_partuuid.stdout_lines | length > 0
|
||||
register: phys_sector_size
|
||||
|
||||
- name: wipe dmcrypt device
|
||||
command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
|
||||
changed_when: false
|
||||
with_together:
|
||||
- "{{ encrypted_ceph_partuuid.stdout_lines }}"
|
||||
- "{{ payload_offset.results }}"
|
||||
|
@ -484,17 +492,20 @@
|
|||
- name: get ceph data partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph data"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_data_partition_to_erase_path
|
||||
|
||||
- name: get ceph lockbox partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph lockbox"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_lockbox_partition_to_erase_path
|
||||
|
||||
- name: see if ceph-volume is installed
|
||||
shell: "command -v ceph-volume"
|
||||
command: command -v ceph-volume
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_volume_present
|
||||
|
||||
|
@ -530,24 +541,28 @@
|
|||
- name: get ceph block partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph block"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_block_partition_to_erase_path
|
||||
|
||||
- name: get ceph journal partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph journal"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_journal_partition_to_erase_path
|
||||
|
||||
- name: get ceph db partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph block.db"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_db_partition_to_erase_path
|
||||
|
||||
- name: get ceph wal partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph block.wal"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: ceph_wal_partition_to_erase_path
|
||||
|
||||
|
@ -563,6 +578,7 @@
|
|||
- name: resolve parent device
|
||||
command: lsblk --nodeps -no pkname "{{ item }}"
|
||||
register: tmp_resolved_parent_device
|
||||
changed_when: false
|
||||
with_items: "{{ combined_devices_list }}"
|
||||
|
||||
- name: set_fact resolved_parent_device
|
||||
|
@ -574,6 +590,7 @@
|
|||
set -o pipefail;
|
||||
wipefs --all "{{ item }}"
|
||||
dd if=/dev/zero of="{{ item }}" bs=1 count=4096
|
||||
changed_when: false
|
||||
with_items: "{{ combined_devices_list }}"
|
||||
|
||||
- name: zap ceph journal/block db/block wal partitions
|
||||
|
@ -592,6 +609,7 @@
|
|||
partprobe /dev/"{{ item }}"
|
||||
udevadm settle --timeout=600
|
||||
with_items: "{{ resolved_parent_device }}"
|
||||
changed_when: false
|
||||
|
||||
- name: purge ceph mon cluster
|
||||
|
||||
|
@ -806,7 +824,7 @@
|
|||
- name: request data removal
|
||||
local_action:
|
||||
module: command
|
||||
echo requesting data removal
|
||||
echo requesting data removal # noqa 301
|
||||
become: false
|
||||
notify: remove data
|
||||
|
||||
|
@ -836,6 +854,7 @@
|
|||
- name: check for anything running ceph
|
||||
command: "ps -u ceph -U ceph"
|
||||
register: check_for_running_ceph
|
||||
changed_when: false
|
||||
failed_when: check_for_running_ceph.rc == 0
|
||||
|
||||
- name: find ceph systemd unit files to remove
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
|
||||
- name: ensure cephfs mountpoint are unmounted
|
||||
command: umount -a -t ceph
|
||||
changed_when: false
|
||||
|
||||
- name: find mapped rbd ids
|
||||
find:
|
||||
|
@ -80,6 +81,7 @@
|
|||
|
||||
- name: use sysfs to unmap rbd devices
|
||||
shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
|
||||
changed_when: false
|
||||
with_items: "{{ rbd_mapped_ids.files }}"
|
||||
|
||||
- name: unload ceph kernel modules
|
||||
|
@ -268,6 +270,7 @@
|
|||
shell: |
|
||||
systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service"
|
||||
register: osd_units
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: disable ceph osd service
|
||||
|
@ -571,6 +574,7 @@
|
|||
|
||||
- name: remove ceph container image
|
||||
command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||
changed_when: false
|
||||
tags:
|
||||
- remove_img
|
||||
|
||||
|
@ -663,6 +667,7 @@
|
|||
|
||||
- name: remove ceph data
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
changed_when: false
|
||||
|
||||
# (todo): remove this when we are able to manage docker
|
||||
# service on atomic host.
|
||||
|
|
|
@ -429,6 +429,7 @@
|
|||
|
||||
- name: get num_pgs - non container
|
||||
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} pg stat --format json"
|
||||
changed_when: false
|
||||
register: ceph_pgs
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
|
@ -471,6 +472,7 @@
|
|||
- name: get osd versions
|
||||
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
|
||||
register: ceph_versions
|
||||
changed_when: false
|
||||
|
||||
- name: set_fact ceph_versions_osd
|
||||
set_fact:
|
||||
|
@ -1019,6 +1021,7 @@
|
|||
|
||||
- name: show ceph status
|
||||
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
|
||||
changed_when: false
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
|
@ -1026,3 +1029,4 @@
|
|||
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions"
|
||||
run_once: True
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
changed_when: false
|
||||
register: ceph_health
|
||||
until: ceph_health is succeeded
|
||||
retries: 5
|
||||
|
@ -80,11 +81,13 @@
|
|||
# removes the MDS from the FS map.
|
||||
- name: exit mds when containerized deployment
|
||||
command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
|
||||
changed_when: false
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: get ceph status
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
|
||||
register: ceph_status
|
||||
changed_when: false
|
||||
|
||||
- name: set_fact current_max_mds
|
||||
set_fact:
|
||||
|
@ -156,3 +159,4 @@
|
|||
post_tasks:
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
|
||||
changed_when: false
|
|
@ -44,6 +44,7 @@
|
|||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
register: ceph_health
|
||||
changed_when: false
|
||||
until: ceph_health is succeeded
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
@ -115,6 +116,7 @@
|
|||
- name: fail if the mgr is reported in ceph mgr dump
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
|
||||
register: mgr_dump
|
||||
changed_when: false
|
||||
failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
|
||||
until: mgr_to_kill_hostname not in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
|
||||
retries: 12
|
||||
|
@ -129,3 +131,4 @@
|
|||
post_tasks:
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
|
||||
changed_when: false
|
|
@ -82,6 +82,7 @@
|
|||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
register: ceph_health
|
||||
changed_when: false
|
||||
until: ceph_health.stdout.find("HEALTH") > -1
|
||||
delegate_to: "{{ mon_host }}"
|
||||
retries: 5
|
||||
|
@ -107,6 +108,7 @@
|
|||
|
||||
- name: remove monitor from the quorum
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
||||
|
@ -114,6 +116,7 @@
|
|||
- name: verify the monitor is out of the cluster
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: result
|
||||
until: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
|
||||
|
@ -136,7 +139,9 @@
|
|||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
changed_when: false
|
||||
|
||||
- name: show ceph mon status
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
changed_when: false
|
|
@ -75,12 +75,14 @@
|
|||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
register: ceph_health
|
||||
changed_when: false
|
||||
until: ceph_health.stdout.find("HEALTH") > -1
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
||||
- name: find the host(s) where the osd(s) is/are running on
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
|
||||
changed_when: false
|
||||
with_items: "{{ osd_to_kill.split(',') }}"
|
||||
register: find_osd_hosts
|
||||
|
||||
|
@ -99,6 +101,7 @@
|
|||
|
||||
- name: get ceph-volume lvm list data
|
||||
command: "{{ container_run_cmd }} lvm list --format json"
|
||||
changed_when: false
|
||||
register: _lvm_list_data
|
||||
delegate_to: "{{ item.0 }}"
|
||||
loop: "{{ _osd_hosts }}"
|
||||
|
@ -135,6 +138,7 @@
|
|||
|
||||
- name: mark osd(s) out of the cluster
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}"
|
||||
changed_when: false
|
||||
run_once: true
|
||||
|
||||
- name: stop osd(s) service
|
||||
|
@ -220,11 +224,13 @@
|
|||
|
||||
- name: ensure osds are marked down
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ osd_to_kill.replace(',', ' ') }}"
|
||||
changed_when: false
|
||||
run_once: true
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: purge osd(s) from the cluster
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items: "{{ osd_to_kill.split(',') }}"
|
||||
|
||||
|
@ -237,6 +243,8 @@
|
|||
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
|
||||
changed_when: false
|
||||
|
||||
- name: show ceph osd tree
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
|
||||
changed_when: false
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
|
||||
register: ceph_health
|
||||
changed_when: false
|
||||
until: ceph_health is succeeded
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
@ -120,3 +121,4 @@
|
|||
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
|
||||
changed_when: false
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
register: ceph_health
|
||||
changed_when: false
|
||||
until: ceph_health is succeeded
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
@ -78,6 +79,7 @@
|
|||
- name: get rgw instances
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
|
||||
register: rgw_instances
|
||||
changed_when: false
|
||||
|
||||
|
||||
- name: exit playbook, if the rgw_to_kill doesn't exist
|
||||
|
@ -106,6 +108,7 @@
|
|||
command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}"
|
||||
register: rgw_to_kill_status
|
||||
failed_when: rgw_to_kill_status.rc == 0
|
||||
changed_when: false
|
||||
delegate_to: "{{ rgw_host }}"
|
||||
retries: 5
|
||||
delay: 2
|
||||
|
@ -113,6 +116,7 @@
|
|||
- name: exit if rgw_to_kill is reported in ceph status
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
|
||||
register: ceph_status
|
||||
changed_when: false
|
||||
failed_when:
|
||||
- (ceph_status.stdout | from_json).services.rgw is defined
|
||||
- rgw_to_kill in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
|
||||
|
@ -130,3 +134,4 @@
|
|||
post_tasks:
|
||||
- name: show ceph health
|
||||
command: "{{ container_exec_cmd | default('')}} ceph --cluster {{ cluster }} -s"
|
||||
changed_when: false
|
||||
|
|
|
@ -137,6 +137,7 @@
|
|||
- name: waiting for the monitor to join the quorum...
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status --format json"
|
||||
register: ceph_health_raw
|
||||
changed_when: false
|
||||
until: >
|
||||
hostvars[mon_host]['ansible_hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
|
||||
retries: "{{ health_mon_check_retries }}"
|
||||
|
@ -341,6 +342,7 @@
|
|||
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
|
||||
register: ceph_pgs
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
|
||||
- name: container - waiting for clean pgs...
|
||||
command: >
|
||||
|
@ -354,6 +356,7 @@
|
|||
retries: "{{ health_osd_check_retries }}"
|
||||
delay: "{{ health_osd_check_delay }}"
|
||||
when: (ceph_pgs.stdout | from_json).pg_summary.num_pgs != 0
|
||||
changed_when: false
|
||||
|
||||
|
||||
- name: unset osd flags
|
||||
|
|
Loading…
Reference in New Issue