diff --git a/infrastructure-playbooks/add-osd.yml b/infrastructure-playbooks/add-osd.yml index 1c2d3e27d..2ebcfe14e 100644 --- a/infrastructure-playbooks/add-osd.yml +++ b/infrastructure-playbooks/add-osd.yml @@ -28,8 +28,7 @@ pre_tasks: - name: gather facts setup: - when: - - not delegate_facts_host | bool + when: not delegate_facts_host | bool - name: gather and delegate facts setup: @@ -39,8 +38,7 @@ - "{{ groups['mons'] }}" - "{{ groups['osds'] }}" run_once: True - when: - - delegate_facts_host | bool + when: delegate_facts_host | bool tasks: - import_role: @@ -63,8 +61,7 @@ - name: gather facts setup: - when: - - not delegate_facts_host | bool + when: not delegate_facts_host | bool - name: gather and delegate facts setup: @@ -74,8 +71,7 @@ - "{{ groups['mons'] }}" - "{{ groups['osds'] }}" run_once: True - when: - - delegate_facts_host | bool + when: delegate_facts_host | bool # this task is needed so we can skip the openstack_config.yml include in roles/ceph-osd - name: set_fact add_osd diff --git a/infrastructure-playbooks/lv-create.yml b/infrastructure-playbooks/lv-create.yml index ec3c72b87..8a3858e70 100644 --- a/infrastructure-playbooks/lv-create.yml +++ b/infrastructure-playbooks/lv-create.yml @@ -1,7 +1,6 @@ - name: creates logical volumes for the bucket index or fs journals on a single device. become: true - hosts: - - osds + hosts: osds vars: logfile: | @@ -56,16 +55,14 @@ vg: "{{ nvme_vg_name }}" size: "{{ journal_size }}" pvs: "{{ nvme_device }}" - with_items: - - "{{ nvme_device_lvs }}" + with_items: "{{ nvme_device_lvs }}" - name: create lvs for fs journals for hdd devices lvol: lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" vg: "{{ nvme_vg_name }}" size: "{{ journal_size }}" - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" - name: create the lv for data portion of the bucket index on the nvme device lvol: @@ -73,8 +70,7 @@ vg: "{{ nvme_vg_name }}" size: "{{ item.size }}" pvs: "{{ nvme_device }}" - with_items: - - "{{ nvme_device_lvs }}" + with_items: "{{ nvme_device_lvs }}" # Make sure all hdd devices have a unique volume group - name: create vgs for all hdd devices @@ -84,8 +80,7 @@ pesize: 4 state: present vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" - name: create lvs for the data portion on hdd devices lvol: @@ -93,8 +88,7 @@ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" size: "{{ hdd_lv_size }}" pvs: "{{ item }}" - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" - name: "write output for osds.yml to {{ logfile_path }}" become: false diff --git a/infrastructure-playbooks/lv-teardown.yml b/infrastructure-playbooks/lv-teardown.yml index a717f9f7e..cab24250a 100644 --- a/infrastructure-playbooks/lv-teardown.yml +++ b/infrastructure-playbooks/lv-teardown.yml @@ -1,7 +1,6 @@ - name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes become: true - hosts: - - osds + hosts: osds vars_prompt: - name: ireallymeanit @@ -52,8 +51,7 @@ vg: "{{ nvme_vg_name }}" state: absent force: yes - with_items: - - "{{ nvme_device_lvs }}" + with_items: "{{ nvme_device_lvs }}" - name: tear down any existing hdd data lvs lvol: @@ -61,8 +59,7 @@ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" state: absent force: yes - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" - name: tear down any existing lv of journal for bucket index lvol: @@ -70,8 +67,7 @@ vg: "{{ nvme_vg_name }}" state: absent force: yes - with_items: - - "{{ nvme_device_lvs }}" + with_items: "{{ nvme_device_lvs }}" - name: tear down any existing lvs of hdd journals lvol: @@ -79,8 +75,7 @@ vg: "{{ nvme_vg_name }}" state: absent force: yes - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" ## Volume Groups - name: remove vg on nvme device @@ -94,8 +89,7 @@ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" state: absent force: yes - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" ## Physical Vols - name: tear down pv for nvme device @@ -103,5 +97,4 @@ - name: tear down pv for each hdd device command: "pvremove --force --yes {{ item }}" - with_items: - - "{{ hdd_devices }}" + with_items: "{{ hdd_devices }}" diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 093c70e41..66700cf06 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -53,8 +53,7 @@ vars: mds_group_name: mdss - hosts: - - "{{ mds_group_name|default('mdss') }}" + hosts: "{{ mds_group_name|default('mdss') }}" gather_facts: false # Already gathered previously @@ -75,8 +74,7 @@ vars: mgr_group_name: mgrs - hosts: - - "{{ mgr_group_name|default('mgrs') }}" + hosts: "{{ mgr_group_name|default('mgrs') }}" gather_facts: false # Already gathered previously @@ -98,8 +96,7 @@ vars: rgw_group_name: rgws - hosts: - - "{{ rgw_group_name|default('rgws') }}" + hosts: "{{ rgw_group_name|default('rgws') }}" gather_facts: false # Already gathered previously @@ -120,8 +117,7 @@ vars: rbdmirror_group_name: rbdmirrors - hosts: - - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" gather_facts: false # Already gathered previously @@ -141,8 +137,7 @@ vars: nfs_group_name: nfss - hosts: - - "{{ nfs_group_name|default('nfss') }}" + hosts: "{{ nfs_group_name|default('nfss') }}" gather_facts: false # Already gathered previously @@ -164,8 +159,7 @@ osd_group_name: osds reboot_osd_node: False - hosts: - - "{{ osd_group_name|default('osds') }}" + hosts: "{{ osd_group_name|default('osds') }}" gather_facts: false # Already gathered previously @@ -368,8 +362,7 @@ - name: resolve parent device command: lsblk --nodeps -no pkname "{{ item }}" register: tmp_resolved_parent_device - with_items: - - "{{ combined_devices_list }}" + with_items: "{{ combined_devices_list }}" - name: set_fact resolved_parent_device set_fact: @@ -395,16 +388,14 @@ parted -s /dev/"{{ item }}" mklabel gpt partprobe /dev/"{{ item }}" udevadm settle --timeout=600 - with_items: - - "{{ resolved_parent_device }}" + with_items: "{{ resolved_parent_device }}" - name: purge ceph mon cluster vars: mon_group_name: mons - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" gather_facts: false # already gathered previously @@ -556,8 +547,7 @@ module: command echo requesting data removal become: false - notify: - - remove data + notify: remove data - name: purge dnf cache command: dnf clean all @@ -595,15 +585,13 @@ file: path: "{{ item.path }}" state: absent - with_items: - - "{{ systemd_files.files }}" + with_items: "{{ systemd_files.files }}" when: ansible_service_mgr == 'systemd' - name: purge fetch directory - hosts: - - localhost + hosts: localhost gather_facts: false diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index ad1938bc5..e198f28c3 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -4,8 +4,7 @@ - name: confirm whether user really meant to purge the cluster - hosts: - - localhost + hosts: localhost gather_facts: false @@ -36,8 +35,7 @@ - name: purge ceph mds cluster - hosts: - - "{{ mds_group_name|default('mdss') }}" + hosts: "{{ mds_group_name|default('mdss') }}" become: true @@ -69,8 +67,7 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img ignore_errors: true - name: purge ceph iscsigws cluster @@ -120,14 +117,12 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img ignore_errors: true - name: purge ceph mgr cluster - hosts: - - "{{ mgr_group_name|default('mgrs') }}" + hosts: "{{ mgr_group_name|default('mgrs') }}" become: true tasks: @@ -157,14 +152,12 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img ignore_errors: true - name: purge ceph rgw cluster - hosts: - - "{{ rgw_group_name|default('rgws') }}" + hosts: "{{ rgw_group_name|default('rgws') }}" become: true @@ -215,8 +208,7 @@ - name: purge ceph rbd-mirror cluster - hosts: - - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" become: true @@ -248,14 +240,12 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img - name: purge ceph nfs cluster - hosts: - - "{{ nfs_group_name|default('nfss') }}" + hosts: "{{ nfs_group_name|default('nfss') }}" become: true @@ -296,14 +286,12 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img - name: purge ceph osd cluster - hosts: - - "{{ osd_group_name | default('osds') }}" + hosts: "{{ osd_group_name | default('osds') }}" gather_facts: true become: true @@ -380,8 +368,7 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img ignore_errors: true - name: include vars from group_vars/osds.yml @@ -403,13 +390,11 @@ file: path: "{{ item.path }}" state: absent - with_items: - - "{{ osd_disk_prepare_logs.files }}" + with_items: "{{ osd_disk_prepare_logs.files }}" - name: purge ceph mon cluster - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" become: true @@ -451,8 +436,7 @@ name: "{{ ceph_docker_image }}" tag: "{{ ceph_docker_image_tag }}" force: yes - tags: - remove_img + tags: remove_img ignore_errors: true - name: check container hosts @@ -509,8 +493,7 @@ become: true - tags: - with_pkg + tags: with_pkg tasks: - name: check if it is Atomic host @@ -629,8 +612,7 @@ - name: purge fetch directory - hosts: - - localhost + hosts: localhost gather_facts: false diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 30682f930..06ba8c7b1 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -59,8 +59,7 @@ - name: gather facts setup: - when: - - not delegate_facts_host | bool + when: not delegate_facts_host | bool - name: gather and delegate facts setup: @@ -68,8 +67,7 @@ delegate_facts: True with_items: "{{ groups['all'] }}" run_once: true - when: - - delegate_facts_host | bool + when: delegate_facts_host | bool - set_fact: rolling_update=true @@ -78,8 +76,7 @@ health_mon_check_retries: 5 health_mon_check_delay: 15 upgrade_ceph_packages: True - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" serial: 1 become: True tasks: @@ -87,8 +84,7 @@ file: path: /etc/profile.d/ceph-aliases.sh state: absent - when: - - containerized_deployment + when: containerized_deployment - name: set mon_host_count set_fact: @@ -97,8 +93,7 @@ - name: fail when less than three monitors fail: msg: "Upgrade of cluster with less than three monitors is not supported." - when: - - mon_host_count | int < 3 + when: mon_host_count | int < 3 - name: select a running monitor set_fact: @@ -150,8 +145,7 @@ enabled: no masked: yes ignore_errors: True - when: - - not containerized_deployment + when: not containerized_deployment # NOTE: we mask the service so the RPM can't restart it # after the package gets upgraded @@ -162,8 +156,7 @@ enabled: no masked: yes ignore_errors: True - when: - - not containerized_deployment + when: not containerized_deployment # only mask the service for mgr because it must be upgraded # after ALL monitors, even when collocated @@ -171,9 +164,8 @@ systemd: name: ceph-mgr@{{ ansible_hostname }} masked: yes - when: - - inventory_hostname in groups[mgr_group_name] | default([]) - or groups[mgr_group_name] | default([]) | length == 0 + when: inventory_hostname in groups[mgr_group_name] | default([]) + or groups[mgr_group_name] | default([]) | length == 0 - name: set osd flags command: ceph --cluster {{ cluster }} osd set {{ item }} @@ -214,8 +206,7 @@ name: ceph-mon@{{ monitor_name }} state: started enabled: yes - when: - - not containerized_deployment + when: not containerized_deployment - name: start ceph mgr systemd: @@ -223,8 +214,7 @@ state: started enabled: yes ignore_errors: True # if no mgr collocated with mons - when: - - not containerized_deployment + when: not containerized_deployment - name: restart containerized ceph mon systemd: @@ -232,8 +222,7 @@ state: restarted enabled: yes daemon_reload: yes - when: - - containerized_deployment + when: containerized_deployment - name: non container | waiting for the monitor to join the quorum... command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json @@ -244,8 +233,7 @@ hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]) retries: "{{ health_mon_check_retries }}" delay: "{{ health_mon_check_delay }}" - when: - - not containerized_deployment + when: not containerized_deployment - name: container | waiting for the containerized monitor to join the quorum... command: > @@ -257,22 +245,19 @@ hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]) retries: "{{ health_mon_check_retries }}" delay: "{{ health_mon_check_delay }}" - when: - - containerized_deployment + when: containerized_deployment - name: upgrade ceph mgr nodes when implicitly collocated on monitors vars: health_mon_check_retries: 5 health_mon_check_delay: 15 upgrade_ceph_packages: True - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" serial: 1 become: True tasks: - name: upgrade mgrs when no mgr group explicitly defined in inventory - when: - - groups.get(mgr_group_name, []) | length == 0 + when: groups.get(mgr_group_name, []) | length == 0 block: - name: stop ceph mgr systemd: @@ -302,8 +287,7 @@ vars: upgrade_ceph_packages: True ceph_release: "{{ ceph_stable_release }}" - hosts: - - "{{ mgr_group_name|default('mgrs') }}" + hosts: "{{ mgr_group_name|default('mgrs') }}" serial: 1 become: True tasks: @@ -342,8 +326,7 @@ health_osd_check_delay: 30 upgrade_ceph_packages: True - hosts: - - "{{ osd_group_name|default('osds') }}" + hosts: "{{ osd_group_name|default('osds') }}" serial: 1 become: True tasks: @@ -372,8 +355,7 @@ enabled: no masked: yes with_items: "{{ osd_ids.stdout_lines }}" - when: - - not containerized_deployment + when: not containerized_deployment - name: set num_osds for non container set_fact: @@ -411,8 +393,7 @@ enabled: yes masked: no with_items: "{{ osd_ids.stdout_lines }}" - when: - - not containerized_deployment + when: not containerized_deployment - name: restart containerized ceph osd systemd: @@ -422,8 +403,7 @@ masked: no daemon_reload: yes with_items: "{{ osd_names.stdout_lines }}" - when: - - containerized_deployment + when: containerized_deployment - name: scan ceph-disk osds with ceph-volume if deploying nautilus command: "ceph-volume --cluster={{ cluster }} simple scan" @@ -444,8 +424,7 @@ - name: set_fact docker_exec_cmd_osd set_fact: docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: - - containerized_deployment + when: containerized_deployment - name: get osd versions command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" @@ -481,14 +460,12 @@ delegate_to: "{{ groups[mon_group_name][0] }}" retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" - when: - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 + when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 - name: unset osd flags - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" become: True @@ -501,8 +478,7 @@ - name: set_fact docker_exec_cmd_osd set_fact: docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: - - containerized_deployment + when: containerized_deployment - name: unset osd flags command: "{{ docker_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}" @@ -533,8 +509,7 @@ - name: upgrade ceph mdss cluster vars: upgrade_ceph_packages: True - hosts: - - "{{ mds_group_name|default('mdss') }}" + hosts: "{{ mds_group_name|default('mdss') }}" serial: 1 become: True tasks: @@ -544,8 +519,7 @@ state: stopped enabled: no masked: yes - when: - - not containerized_deployment + when: not containerized_deployment - import_role: name: ceph-defaults @@ -570,8 +544,7 @@ state: started enabled: yes masked: no - when: - - not containerized_deployment + when: not containerized_deployment - name: restart ceph mds systemd: @@ -580,15 +553,13 @@ enabled: yes masked: no daemon_reload: yes - when: - - containerized_deployment + when: containerized_deployment - name: upgrade ceph rgws cluster vars: upgrade_ceph_packages: True - hosts: - - "{{ rgw_group_name|default('rgws') }}" + hosts: "{{ rgw_group_name|default('rgws') }}" serial: 1 become: True tasks: @@ -613,8 +584,7 @@ enabled: no masked: yes with_items: "{{ rgw_instances }}" - when: - - not containerized_deployment + when: not containerized_deployment - import_role: name: ceph-handler @@ -637,15 +607,13 @@ masked: no daemon_reload: yes with_items: "{{ rgw_instances }}" - when: - - containerized_deployment + when: containerized_deployment - name: upgrade ceph rbd mirror node vars: upgrade_ceph_packages: True - hosts: - - "{{ rbd_mirror_group_name|default('rbdmirrors') }}" + hosts: "{{ rbd_mirror_group_name|default('rbdmirrors') }}" serial: 1 become: True tasks: @@ -679,8 +647,7 @@ state: started enabled: yes masked: no - when: - - not containerized_deployment + when: not containerized_deployment - name: restart containerized ceph rbd mirror systemd: @@ -689,15 +656,13 @@ enabled: yes masked: no daemon_reload: yes - when: - - containerized_deployment + when: containerized_deployment - name: upgrade ceph nfs node vars: upgrade_ceph_packages: True - hosts: - - "{{ nfs_group_name|default('nfss') }}" + hosts: "{{ nfs_group_name|default('nfss') }}" serial: 1 become: True tasks: @@ -711,8 +676,7 @@ enabled: no masked: yes failed_when: false - when: - - not containerized_deployment + when: not containerized_deployment - import_role: name: ceph-defaults @@ -811,8 +775,7 @@ - name: upgrade ceph client node vars: upgrade_ceph_packages: True - hosts: - - "{{ client_group_name|default('clients') }}" + hosts: "{{ client_group_name|default('clients') }}" serial: "{{ client_update_batch | default(20) }}" become: True tasks: @@ -834,8 +797,7 @@ name: ceph-client - name: complete upgrade - hosts: - - all + hosts: all become: True tasks: - import_role: @@ -877,8 +839,7 @@ - name: show ceph status - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" become: True tasks: - import_role: @@ -887,8 +848,7 @@ - name: set_fact docker_exec_cmd_status set_fact: docker_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: - - containerized_deployment + when: containerized_deployment - name: show ceph status command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s" diff --git a/infrastructure-playbooks/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml index 88634525e..8f3d5a86c 100644 --- a/infrastructure-playbooks/shrink-mon.yml +++ b/infrastructure-playbooks/shrink-mon.yml @@ -14,8 +14,7 @@ - name: gather facts and check the init system - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" become: true @@ -23,8 +22,7 @@ - debug: msg="gather facts on all Ceph hosts for following reference" - name: confirm whether user really meant to remove monitor from the ceph cluster - hosts: - - localhost + hosts: localhost become: true vars_prompt: - name: ireallymeanit @@ -40,8 +38,7 @@ fail: msg: "You are about to shrink the only monitor present in the cluster. If you really want to do that, please use the purge-cluster playbook." - when: - - groups[mon_group_name] | length | int == 1 + when: groups[mon_group_name] | length | int == 1 - name: exit playbook, if no monitor was given fail: @@ -49,14 +46,12 @@ Exiting shrink-cluster playbook, no monitor was removed. On the command line when invoking the playbook, you can use -e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs." - when: - - mon_to_kill is not defined + when: mon_to_kill is not defined - name: exit playbook, if the monitor is not part of the inventory fail: msg: "It seems that the host given is not part of your inventory, please make sure it is." - when: - - mon_to_kill not in groups[mon_group_name] + when: mon_to_kill not in groups[mon_group_name] - name: exit playbook, if user did not mean to shrink cluster fail: @@ -64,8 +59,7 @@ To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" - when: - - ireallymeanit != 'yes' + when: ireallymeanit != 'yes' - import_role: name: ceph-defaults @@ -78,8 +72,7 @@ set_fact: mon_host: "{{ item }}" with_items: "{{ groups[mon_group_name] }}" - when: - - item != mon_to_kill + when: item != mon_to_kill - name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)" set_fact: @@ -136,15 +129,13 @@ msg: "The monitor has been successfully removed from the cluster. Please remove the monitor entry from the rest of your ceph configuration files, cluster wide." run_once: true - when: - - mon_to_kill_hostname not in result.stdout + when: mon_to_kill_hostname not in result.stdout - name: fail if monitor is still part of the cluster fail: msg: "Monitor appears to still be part of the cluster, please check what happened." run_once: true - when: - - mon_to_kill_hostname in result.stdout + when: mon_to_kill_hostname in result.stdout - name: show ceph health command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index 2402fb8cc..3ff5cc235 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -23,8 +23,7 @@ - name: confirm whether user really meant to remove osd(s) from the cluster - hosts: - - localhost + hosts: localhost become: true diff --git a/infrastructure-playbooks/storage-inventory.yml b/infrastructure-playbooks/storage-inventory.yml index f4cb3fed5..2284587ed 100644 --- a/infrastructure-playbooks/storage-inventory.yml +++ b/infrastructure-playbooks/storage-inventory.yml @@ -7,8 +7,7 @@ - name: gather facts and check the init system - hosts: - - "{{ osd_group_name|default('osds') }}" + hosts: "{{ osd_group_name|default('osds') }}" become: true @@ -17,8 +16,7 @@ - name: query each host for storage device inventory - hosts: - - "{{ osd_group_name|default('osds') }}" + hosts: "{{ osd_group_name|default('osds') }}" become: true diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 05dcf0a15..786c81448 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -3,8 +3,7 @@ - name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons - hosts: - - localhost + hosts: localhost gather_facts: false @@ -53,8 +52,7 @@ containerized_deployment: true switch_to_containers: True mon_group_name: mons - hosts: - - "{{ mon_group_name|default('mons') }}" + hosts: "{{ mon_group_name|default('mons') }}" serial: 1 become: true pre_tasks: @@ -147,8 +145,7 @@ - name: switching from non-containerized to containerized ceph mgr - hosts: - - "{{ mgr_group_name|default('mgrs') }}" + hosts: "{{ mgr_group_name|default('mgrs') }}" vars: containerized_deployment: true @@ -217,8 +214,7 @@ containerized_deployment: true osd_group_name: osds - hosts: - - "{{ osd_group_name|default('osds') }}" + hosts: "{{ osd_group_name|default('osds') }}" serial: 1 become: true @@ -284,8 +280,7 @@ shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb changed_when: false failed_when: false - when: - - ldb_files.rc == 0 + when: ldb_files.rc == 0 - name: check if containerized osds are already running command: > @@ -305,10 +300,8 @@ umount /var/lib/ceph/osd/{{ item }} changed_when: false failed_when: false - with_items: - - "{{ osd_dirs.stdout_lines }}" - when: - - osd_running.rc != 0 + with_items: "{{ osd_dirs.stdout_lines }}" + when: osd_running.rc != 0 tasks: - import_role: @@ -344,14 +337,12 @@ delegate_to: "{{ groups[mon_group_name][0] }}" retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" - when: - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 + when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 - name: switching from non-containerized to containerized ceph mds - hosts: - - "{{ mds_group_name|default('mdss') }}" + hosts: "{{ mds_group_name|default('mdss') }}" vars: containerized_deployment: true @@ -411,8 +402,7 @@ - name: switching from non-containerized to containerized ceph rgw - hosts: - - "{{ rgw_group_name|default('rgws') }}" + hosts: "{{ rgw_group_name|default('rgws') }}" vars: containerized_deployment: true @@ -469,8 +459,7 @@ - name: switching from non-containerized to containerized ceph rbd-mirror - hosts: - - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" vars: containerized_deployment: true @@ -529,8 +518,7 @@ - name: switching from non-containerized to containerized ceph nfs - hosts: - - "{{ nfs_group_name|default('nfss') }}" + hosts: "{{ nfs_group_name|default('nfss') }}" vars: containerized_deployment: true diff --git a/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml b/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml index 6ff8702dd..ab92b7351 100644 --- a/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml +++ b/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml @@ -481,8 +481,7 @@ service: > name={{ item }} state=stopped - with_items: - - radosgw + with_items: radosgw when: migration_completed.stat.exists == False - name: Wait for radosgw to be down @@ -524,16 +523,14 @@ shell: > {{ item }} chdir=/var/lib/ceph/ - with_items: - - cp etc/ceph/* /etc/ceph/ + with_items: cp etc/ceph/* /etc/ceph/ when: migration_completed.stat.exists == False - name: Start rados gateway service: > name={{ item }} state=started - with_items: - - radosgw + with_items: radosgw when: migration_completed.stat.exists == False - name: Wait for radosgw to be up again diff --git a/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml b/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml index eb7f9f878..c6aadfe18 100644 --- a/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml +++ b/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml @@ -27,8 +27,7 @@ journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106 data_typecode: 4fbd7e29-9d25-41b8-afd0-062c0ceff05d devices: [] - hosts: - - "{{ osd_group_name }}" + hosts: "{{ osd_group_name }}" tasks: @@ -68,14 +67,12 @@ - set_fact: owner: 167 group: 167 - when: - - ansible_os_family == "RedHat" + when: ansible_os_family == "RedHat" - set_fact: owner: 64045 group: 64045 - when: - - ansible_os_family == "Debian" + when: ansible_os_family == "Debian" - name: change partitions ownership file: @@ -96,6 +93,5 @@ with_subelements: - "{{ devices }}" - partitions - when: - item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') -... \ No newline at end of file + when: item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') +... diff --git a/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml b/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml index e999759ed..dc7766c95 100644 --- a/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml +++ b/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml @@ -33,8 +33,7 @@ osd_group_name: osds journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106 osds_journal_devices: [] - hosts: - - "{{ osd_group_name }}" + hosts: "{{ osd_group_name }}" serial: 1 tasks: @@ -51,8 +50,7 @@ msg: exit playbook osd(s) is not on this host with_items: osds_dir_stat.results - when: - - osds_dir_stat is defined and item.stat.exists == false + when: osds_dir_stat is defined and item.stat.exists == false - name: install sgdisk(gdisk) package: @@ -75,40 +73,33 @@ --typecode={{ item.item[1].index }}:{{ journal_typecode }} --partition-guid={{ item.item[1].index }}:{{ item.stdout }} --mbrtogpt -- {{ item.item[0].device_name }} - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" - name: stop osd(s) service service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: stopped - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" - name: flush osd(s) journal command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }} - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" when: osds_journal_devices is defined - name: update osd(s) journal soft link command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" - name: update osd(s) journal uuid command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" - name: initialize osd(s) new journal command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" - name: start osd(s) service service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: started - with_items: - - "{{ osds.results }}" + with_items: "{{ osds.results }}" diff --git a/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml b/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml index d19fe129c..f1f4796c3 100644 --- a/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml +++ b/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml @@ -96,24 +96,20 @@ --typecode={{ item.item[1].index }}:{{ journal_typecode }} --partition-guid={{ item.item[1].index }}:{{ item.stdout }} --mbrtogpt -- {{ item.item[0].device_name }} - with_items: - - "{{ osds_uuid.results }}" + with_items: "{{ osds_uuid.results }}" - name: stop osd(s) service service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: stopped - with_items: - - "{{ osds_uuid.results }}" + with_items: "{{ osds_uuid.results }}" - name: reinitialize osd(s) journal in new ssd command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} - with_items: - - "{{ osds_uuid.results }}" + with_items: "{{ osds_uuid.results }}" - name: start osd(s) service service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: started - with_items: - - "{{ osds_uuid.results }}" + with_items: "{{ osds_uuid.results }}" diff --git a/infrastructure-playbooks/untested-by-ci/replace-osd.yml b/infrastructure-playbooks/untested-by-ci/replace-osd.yml index fc364097d..bb9b67ce1 100644 --- a/infrastructure-playbooks/untested-by-ci/replace-osd.yml +++ b/infrastructure-playbooks/untested-by-ci/replace-osd.yml @@ -26,8 +26,7 @@ - debug: msg="gather facts on all Ceph hosts for following reference" - name: confirm whether user really meant to replace osd(s) - hosts: - - localhost + hosts: localhost become: true vars_prompt: - name: ireallymeanit @@ -90,8 +89,7 @@ with_items: "{{ osd_hosts }}" delegate_to: "{{ item }}" failed_when: false - when: - - not containerized_deployment + when: not containerized_deployment - name: fail when admin key is not present fail: @@ -112,8 +110,7 @@ - "{{ osd_to_replace.split(',') }}" register: osd_to_replace_disks delegate_to: "{{ item.0 }}" - when: - - containerized_deployment + when: containerized_deployment - name: zapping osd(s) - container shell: > @@ -125,8 +122,7 @@ - "{{ osd_hosts }}" - "{{ osd_to_replace_disks.results }}" delegate_to: "{{ item.0 }}" - when: - - containerized_deployment + when: containerized_deployment - name: zapping osd(s) - non container command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }} @@ -135,8 +131,7 @@ - "{{ osd_hosts }}" - "{{ osd_to_replace_disks.results }}" delegate_to: "{{ item.0 }}" - when: - - not containerized_deployment + when: not containerized_deployment - name: destroying osd(s) command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap @@ -145,8 +140,7 @@ - "{{ osd_hosts }}" - "{{ osd_to_replace.split(',') }}" delegate_to: "{{ item.0 }}" - when: - - not containerized_deployment + when: not containerized_deployment - name: replace osd(s) - prepare - non container command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen) diff --git a/library/ceph_crush.py b/library/ceph_crush.py index 77714bb72..0923c3da9 100644 --- a/library/ceph_crush.py +++ b/library/ceph_crush.py @@ -56,8 +56,7 @@ EXAMPLES = ''' location: "{{ hostvars[item]['osd_crush_location'] }}" containerized: "{{ docker_exec_cmd }}" with_items: "{{ groups[osd_group_name] }}" - when: - - crush_rule_config + when: crush_rule_config ''' RETURN = '''# ''' diff --git a/raw_install_python.yml b/raw_install_python.yml index cd274ee63..f752a6096 100644 --- a/raw_install_python.yml +++ b/raw_install_python.yml @@ -9,22 +9,19 @@ raw: apt-get -y install python-simplejson ignore_errors: yes register: result - when: - - systempython.stat is undefined or not systempython.stat.exists + when: systempython.stat is undefined or not systempython.stat.exists until: result is succeeded - name: install python for fedora raw: dnf -y install python3; ln -sf /usr/bin/python3 /usr/bin/python creates=/usr/bin/python ignore_errors: yes register: result - when: - - systempython.stat is undefined or not systempython.stat.exists + when: systempython.stat is undefined or not systempython.stat.exists until: (result is succeeded) and ('Failed' not in result.stdout) - name: install python for opensuse raw: zypper -n install python-base creates=/usr/bin/python2.7 ignore_errors: yes register: result - when: - - systempython.stat is undefined or not systempython.stat.exists - until: result is succeeded \ No newline at end of file + when: systempython.stat is undefined or not systempython.stat.exists + until: result is succeeded diff --git a/roles/ceph-agent/tasks/pre_requisite.yml b/roles/ceph-agent/tasks/pre_requisite.yml index 3b1ed5d98..8a7890e27 100644 --- a/roles/ceph-agent/tasks/pre_requisite.yml +++ b/roles/ceph-agent/tasks/pre_requisite.yml @@ -7,8 +7,7 @@ state: present register: result until: result is succeeded - tags: - - package-install + tags: package-install - name: create minion.d directory file: diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index 53070850b..db6e2a3c9 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -2,15 +2,13 @@ - name: set_fact keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module set_fact: keys_tmp: "{{ keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}" - when: - - item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap + when: item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap with_items: "{{ keys }}" - name: set_fact keys - override keys_tmp with keys set_fact: keys: "{{ keys_tmp }}" - when: - - keys_tmp is defined + when: keys_tmp is defined # dummy container setup is only supported on x86_64 # when running with containerized_deployment: true this task @@ -22,8 +20,7 @@ name: "{{ item }}" groups: _filtered_clients with_items: "{{ groups[client_group_name] }}" - when: - - (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment) + when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment) - name: set_fact delegated_node set_fact: @@ -58,8 +55,7 @@ - name: slurp client cephx key(s) slurp: src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring" - with_items: - - "{{ keys }}" + with_items: "{{ keys }}" register: slurp_client_keys delegate_to: "{{ delegated_node }}" when: @@ -129,8 +125,7 @@ with_items: "{{ pools | unique }}" changed_when: false delegate_to: "{{ delegated_node }}" - when: - - item.application is defined + when: item.application is defined - name: get client cephx keys copy: @@ -139,7 +134,5 @@ mode: "{{ item.item.get('mode', '0600') }}" owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" - with_items: - - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}" - when: - - not item.get('skipped', False) + with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}" + when: not item.get('skipped', False) diff --git a/roles/ceph-client/tasks/main.yml b/roles/ceph-client/tasks/main.yml index 87ea5fd70..86794bc4a 100644 --- a/roles/ceph-client/tasks/main.yml +++ b/roles/ceph-client/tasks/main.yml @@ -4,5 +4,4 @@ - name: include create_users_keys.yml include_tasks: create_users_keys.yml - when: - - user_config + when: user_config diff --git a/roles/ceph-common/tasks/configure_cluster_name.yml b/roles/ceph-common/tasks/configure_cluster_name.yml index 93d00a6df..ab7e51853 100644 --- a/roles/ceph-common/tasks/configure_cluster_name.yml +++ b/roles/ceph-common/tasks/configure_cluster_name.yml @@ -6,8 +6,7 @@ create: yes line: "CLUSTER={{ cluster }}" regexp: "^CLUSTER=" - when: - - ansible_os_family in ["RedHat", "Suse"] + when: ansible_os_family in ["RedHat", "Suse"] # NOTE(leseb): we are performing the following check # to ensure any Jewel installation will not fail. @@ -20,8 +19,7 @@ # - All previous versions from Canonical # - Infernalis from ceph.com - name: debian based systems - configure cluster name - when: - - ansible_os_family == "Debian" + when: ansible_os_family == "Debian" block: - name: check /etc/default/ceph exist stat: @@ -30,8 +28,7 @@ check_mode: no - name: configure cluster name - when: - - etc_default_ceph.stat.exists + when: etc_default_ceph.stat.exists block: - name: when /etc/default/ceph is not dir lineinfile: @@ -40,8 +37,7 @@ create: yes regexp: "^CLUSTER=" line: "CLUSTER={{ cluster }}" - when: - - not etc_default_ceph.stat.isdir + when: not etc_default_ceph.stat.isdir - name: when /etc/default/ceph is dir lineinfile: @@ -50,5 +46,4 @@ create: yes regexp: "^CLUSTER=" line: "CLUSTER={{ cluster }}" - when: - - etc_default_ceph.stat.isdir + when: etc_default_ceph.stat.isdir diff --git a/roles/ceph-common/tasks/configure_memory_allocator.yml b/roles/ceph-common/tasks/configure_memory_allocator.yml index 7810e1b34..b76145777 100644 --- a/roles/ceph-common/tasks/configure_memory_allocator.yml +++ b/roles/ceph-common/tasks/configure_memory_allocator.yml @@ -9,8 +9,7 @@ when: - ansible_os_family == 'Debian' - etc_default_ceph.stat.exists - notify: - - restart ceph osds + notify: restart ceph osds - name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat lineinfile: @@ -19,7 +18,5 @@ create: yes regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" - when: - - ansible_os_family == 'RedHat' - notify: - - restart ceph osds + when: ansible_os_family == 'RedHat' + notify: restart ceph osds diff --git a/roles/ceph-common/tasks/create_rbd_client_dir.yml b/roles/ceph-common/tasks/create_rbd_client_dir.yml index c55909d76..f893bbcfc 100644 --- a/roles/ceph-common/tasks/create_rbd_client_dir.yml +++ b/roles/ceph-common/tasks/create_rbd_client_dir.yml @@ -9,5 +9,4 @@ with_items: - "{{ rbd_client_admin_socket_path }}" - "{{ rbd_client_log_path }}" - when: - - rbd_client_directories + when: rbd_client_directories diff --git a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml index 27300ad7f..1a653b198 100644 --- a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml @@ -1,25 +1,20 @@ --- - name: include debian_community_repository.yml include_tasks: debian_community_repository.yml - when: - - ceph_repository == 'community' + when: ceph_repository == 'community' - name: include debian_rhcs_repository.yml include_tasks: debian_rhcs_repository.yml - when: - - ceph_repository == 'rhcs' + when: ceph_repository == 'rhcs' - name: include debian_dev_repository.yml include_tasks: debian_dev_repository.yml - when: - - ceph_repository == 'dev' + when: ceph_repository == 'dev' - name: include debian_custom_repository.yml include_tasks: debian_custom_repository.yml - when: - - ceph_repository == 'custom' + when: ceph_repository == 'custom' - name: include debian_uca_repository.yml include_tasks: debian_uca_repository.yml - when: - - ceph_repository == 'uca' \ No newline at end of file + when: ceph_repository == 'uca' diff --git a/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml index 941f23929..6f18a11ba 100644 --- a/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml @@ -3,29 +3,25 @@ file: path: /tmp state: directory - when: - - use_installer + when: use_installer - name: use mktemp to create name for rundep command: "mktemp /tmp/rundep.XXXXXXXX" register: rundep_location - when: - - use_installer + when: use_installer - name: copy rundep copy: src: "{{ ansible_dir }}/rundep" dest: "{{ item }}" with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}" - when: - - use_installer + when: use_installer - name: install ceph dependencies script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}" become: true with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}" - when: - - use_installer + when: use_installer - name: ensure rsync is installed package: diff --git a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml index b0190df7f..a18801120 100644 --- a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml @@ -1,23 +1,19 @@ --- - name: include redhat_community_repository.yml include_tasks: redhat_community_repository.yml - when: - - ceph_repository == 'community' + when: ceph_repository == 'community' - name: include redhat_rhcs_repository.yml include_tasks: redhat_rhcs_repository.yml - when: - - ceph_repository == 'rhcs' + when: ceph_repository == 'rhcs' - name: include redhat_dev_repository.yml include_tasks: redhat_dev_repository.yml - when: - - ceph_repository == 'dev' + when: ceph_repository == 'dev' - name: include redhat_custom_repository.yml include_tasks: redhat_custom_repository.yml - when: - - ceph_repository == 'custom' + when: ceph_repository == 'custom' # Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version - name: purge yum cache @@ -25,5 +21,4 @@ args: warn: no changed_when: false - when: - ansible_pkg_mgr == 'yum' + when: ansible_pkg_mgr == 'yum' diff --git a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml index 1e23e3ed3..689cbd915 100644 --- a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml @@ -1,5 +1,4 @@ --- - name: include suse_obs_repository.yml include_tasks: suse_obs_repository.yml - when: - - ceph_repository == 'obs' + when: ceph_repository == 'obs' diff --git a/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml b/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml index 4638a991e..200654317 100644 --- a/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml +++ b/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml @@ -9,10 +9,8 @@ - name: include prerequisite_rhcs_iso_install_debian.yml include_tasks: prerequisite_rhcs_iso_install_debian.yml - when: - - ceph_repository_type == 'iso' + when: ceph_repository_type == 'iso' - name: include prerequisite_rhcs_cdn_install_debian.yml include_tasks: prerequisite_rhcs_cdn_install_debian.yml - when: - - ceph_repository_type == 'cdn' + when: ceph_repository_type == 'cdn' diff --git a/roles/ceph-common/tasks/installs/install_on_debian.yml b/roles/ceph-common/tasks/installs/install_on_debian.yml index 9a3978f00..78143a61a 100644 --- a/roles/ceph-common/tasks/installs/install_on_debian.yml +++ b/roles/ceph-common/tasks/installs/install_on_debian.yml @@ -1,8 +1,7 @@ --- - name: include configure_debian_repository_installation.yml include_tasks: configure_debian_repository_installation.yml - when: - - ceph_origin == 'repository' + when: ceph_origin == 'repository' - name: update apt cache if cache_valid_time has expired apt: diff --git a/roles/ceph-common/tasks/installs/install_on_redhat.yml b/roles/ceph-common/tasks/installs/install_on_redhat.yml index 52e4eb0e7..93e261484 100644 --- a/roles/ceph-common/tasks/installs/install_on_redhat.yml +++ b/roles/ceph-common/tasks/installs/install_on_redhat.yml @@ -1,15 +1,12 @@ --- - name: include configure_redhat_repository_installation.yml include_tasks: configure_redhat_repository_installation.yml - when: - - ceph_origin == 'repository' + when: ceph_origin == 'repository' - name: include configure_redhat_local_installation.yml include_tasks: configure_redhat_local_installation.yml - when: - - ceph_origin == 'local' + when: ceph_origin == 'local' - name: include install_redhat_packages.yml include_tasks: install_redhat_packages.yml - when: - - (ceph_origin == 'repository' or ceph_origin == 'distro') + when: (ceph_origin == 'repository' or ceph_origin == 'distro') diff --git a/roles/ceph-common/tasks/installs/install_on_suse.yml b/roles/ceph-common/tasks/installs/install_on_suse.yml index 50aa97e52..e185d56dd 100644 --- a/roles/ceph-common/tasks/installs/install_on_suse.yml +++ b/roles/ceph-common/tasks/installs/install_on_suse.yml @@ -5,13 +5,11 @@ - name: Check for supported installation method on suse fail: msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}'" - when: - - ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs') + when: ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs') - name: include configure_suse_repository_installation.yml include_tasks: configure_suse_repository_installation.yml - when: - - ceph_origin == 'repository' + when: ceph_origin == 'repository' - name: install dependencies zypper: diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index d692c1758..28b56fd9a 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -5,8 +5,7 @@ state: present register: result until: result is succeeded - when: - - ansible_distribution == 'RedHat' + when: ansible_distribution == 'RedHat' - name: install centos dependencies yum: @@ -14,12 +13,11 @@ state: present register: result until: result is succeeded - when: - - ansible_distribution == 'CentOS' + when: ansible_distribution == 'CentOS' - name: install redhat ceph packages package: name: "{{ redhat_ceph_pkgs | unique }}" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result - until: result is succeeded \ No newline at end of file + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml b/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml index 7deeedd82..648a10e3b 100644 --- a/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml +++ b/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml @@ -2,17 +2,14 @@ - name: enable red hat storage monitor repository rhsm_repository: name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms" - when: - - (mon_group_name in group_names or mgr_group_name in group_names) + when: (mon_group_name in group_names or mgr_group_name in group_names) - name: enable red hat storage osd repository rhsm_repository: name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms" - when: - - osd_group_name in group_names + when: osd_group_name in group_names - name: enable red hat storage tools repository rhsm_repository: name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms" - when: - - (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names) \ No newline at end of file + when: (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names) diff --git a/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install.yml b/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install.yml index b6a7728c3..0b7783a4c 100644 --- a/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install.yml +++ b/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install.yml @@ -12,8 +12,7 @@ path: "{{ ceph_rhcs_iso_path | dirname }}" state: directory recurse: yes - when: - - ceph_rhcs_iso_path | dirname != '/' + when: ceph_rhcs_iso_path | dirname != '/' - name: fetch the red hat storage iso from the ansible server for redhat systems copy: diff --git a/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml b/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml index 177abc672..ade0364ac 100644 --- a/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml +++ b/roles/ceph-common/tasks/installs/prerequisite_rhcs_iso_install_debian.yml @@ -12,8 +12,7 @@ path: "{{ ceph_rhcs_iso_path | dirname }}" state: directory recurse: yes - when: - - ceph_rhcs_iso_path | dirname != '/' + when: ceph_rhcs_iso_path | dirname != '/' - name: fetch the red hat storage iso from the ansible server for debian systems copy: diff --git a/roles/ceph-common/tasks/installs/redhat_community_repository.yml b/roles/ceph-common/tasks/installs/redhat_community_repository.yml index 435d10c72..332491d4a 100644 --- a/roles/ceph-common/tasks/installs/redhat_community_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_community_repository.yml @@ -4,8 +4,7 @@ name: yum-plugin-priorities register: result until: result is succeeded - tags: - - with_pkg + tags: with_pkg - name: configure red hat ceph community repository stable key rpm_key: @@ -38,4 +37,4 @@ file: ceph_stable priority: 2 register: result - until: result is succeeded \ No newline at end of file + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml b/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml index cfb5e481a..78cfca717 100644 --- a/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml @@ -1,10 +1,8 @@ --- - name: include prerequisite_rhcs_iso_install.yml include_tasks: prerequisite_rhcs_iso_install.yml - when: - - ceph_repository_type == 'iso' + when: ceph_repository_type == 'iso' - name: include prerequisite_rhcs_cdn_install.yml include_tasks: prerequisite_rhcs_cdn_install.yml - when: - - ceph_repository_type == 'cdn' + when: ceph_repository_type == 'cdn' diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index fceb190b9..6c33f9b71 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -2,27 +2,22 @@ - name: include_tasks installs/install_on_redhat.yml include_tasks: installs/install_on_redhat.yml when: ansible_os_family == 'RedHat' - tags: - - package-install + tags: package-install - name: include_tasks installs/install_on_suse.yml include_tasks: installs/install_on_suse.yml when: ansible_os_family == 'Suse' - tags: - - package-install + tags: package-install - name: include installs/install_on_debian.yml include_tasks: installs/install_on_debian.yml - tags: - - package-install - when: - - ansible_os_family == 'Debian' + tags: package-install + when: ansible_os_family == 'Debian' - name: include_tasks installs/install_on_clear.yml include_tasks: installs/install_on_clear.yml when: ansible_os_family == 'ClearLinux' - tags: - - package-install + tags: package-install - name: get ceph version command: ceph --version @@ -37,12 +32,10 @@ # override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory - name: include release-rhcs.yml include_tasks: release-rhcs.yml - when: - - ceph_repository in ['rhcs', 'dev'] + when: ceph_repository in ['rhcs', 'dev'] or ceph_origin == 'distro' - tags: - - always + tags: always - name: set_fact ceph_release - override ceph_release with ceph_stable_release set_fact: @@ -50,8 +43,7 @@ when: - ceph_origin == 'repository' - ceph_repository not in ['dev', 'rhcs'] - tags: - - always + tags: always - name: include create_rbd_client_dir.yml include_tasks: create_rbd_client_dir.yml diff --git a/roles/ceph-common/tasks/release-rhcs.yml b/roles/ceph-common/tasks/release-rhcs.yml index d0ac6d3a4..eee58e074 100644 --- a/roles/ceph-common/tasks/release-rhcs.yml +++ b/roles/ceph-common/tasks/release-rhcs.yml @@ -2,29 +2,24 @@ - name: set_fact ceph_release jewel set_fact: ceph_release: jewel - when: - - ceph_version.split('.')[0] is version_compare('10', '==') + when: ceph_version.split('.')[0] is version_compare('10', '==') - name: set_fact ceph_release kraken set_fact: ceph_release: kraken - when: - - ceph_version.split('.')[0] is version_compare('11', '==') + when: ceph_version.split('.')[0] is version_compare('11', '==') - name: set_fact ceph_release luminous set_fact: ceph_release: luminous - when: - - ceph_version.split('.')[0] is version_compare('12', '==') + when: ceph_version.split('.')[0] is version_compare('12', '==') - name: set_fact ceph_release mimic set_fact: ceph_release: mimic - when: - - ceph_version.split('.')[0] is version_compare('13', '==') + when: ceph_version.split('.')[0] is version_compare('13', '==') - name: set_fact ceph_release nautilus set_fact: ceph_release: nautilus - when: - - ceph_version.split('.')[0] is version_compare('14', '==') + when: ceph_version.split('.')[0] is version_compare('14', '==') diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 05f4700ad..29f6071ad 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -1,8 +1,7 @@ --- - name: include create_ceph_initial_dirs.yml include_tasks: create_ceph_initial_dirs.yml - when: - - containerized_deployment|bool + when: containerized_deployment|bool - name: config file operations related to OSDs when: @@ -14,8 +13,7 @@ - name: count number of osds for lvm scenario set_fact: num_osds: "{{ lvm_volumes | length | int }}" - when: - - lvm_volumes | default([]) | length > 0 + when: lvm_volumes | default([]) | length > 0 - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created ceph_volume: @@ -33,8 +31,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" PYTHONIOENCODING: utf-8 - when: - - devices | default([]) | length > 0 + when: devices | default([]) | length > 0 - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' set_fact: @@ -65,8 +62,7 @@ # ceph-common - name: config file operation for non-containerized scenarios - when: - - not containerized_deployment|bool + when: not containerized_deployment|bool block: - name: create ceph conf directory file: @@ -102,8 +98,7 @@ state: directory mode: "0755" delegate_to: localhost - when: - - ceph_conf_local + when: ceph_conf_local - name: "generate {{ cluster }}.conf configuration file locally" config_template: @@ -120,8 +115,7 @@ - ceph_conf_local - name: config file operations for containerized scenarios - when: - - containerized_deployment|bool + when: containerized_deployment|bool block: - name: create a local fetch directory if it does not exist file: diff --git a/roles/ceph-container-common/tasks/fetch_image.yml b/roles/ceph-container-common/tasks/fetch_image.yml index 040a0f493..f894111b3 100644 --- a/roles/ceph-container-common/tasks/fetch_image.yml +++ b/roles/ceph-container-common/tasks/fetch_image.yml @@ -183,8 +183,7 @@ until: docker_image.rc == 0 retries: "{{ docker_pull_retry }}" delay: 10 - when: - - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) + when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) - name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling" command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" @@ -195,15 +194,13 @@ - name: set_fact image_repodigest_after_pulling set_fact: image_repodigest_after_pulling: "{{ (image_inspect_after_pull.stdout | from_json)[0].RepoDigests[0].split('@')[1] }}" - when: - - image_inspect_after_pull.rc == 0 + when: image_inspect_after_pull.rc == 0 - name: set_fact ceph_mon_image_updated set_fact: ceph_mon_image_updated: "{{ ceph_mon_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph mons + notify: restart ceph mons when: - mon_group_name in group_names - ceph_mon_container_inspect_before_pull.get('rc') == 0 @@ -213,8 +210,7 @@ set_fact: ceph_osd_image_updated: "{{ ceph_osd_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph osds + notify: restart ceph osds when: - osd_group_name in group_names - ceph_osd_container_inspect_before_pull.get('rc') == 0 @@ -224,8 +220,7 @@ set_fact: ceph_mds_image_updated: "{{ ceph_mds_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph mdss + notify: restart ceph mdss when: - mds_group_name in group_names - ceph_mds_container_inspect_before_pull.get('rc') == 0 @@ -235,8 +230,7 @@ set_fact: ceph_rgw_image_updated: "{{ ceph_rgw_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph rgws + notify: restart ceph rgws when: - rgw_group_name in group_names - ceph_rgw_container_inspect_before_pull.get('rc') == 0 @@ -246,8 +240,7 @@ set_fact: ceph_mgr_image_updated: "{{ ceph_mgr_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph mgrs + notify: restart ceph mgrs when: - mgr_group_name in group_names - ceph_mgr_container_inspect_before_pull.get('rc') == 0 @@ -257,8 +250,7 @@ set_fact: ceph_rbd_mirror_image_updated: "{{ ceph_rbd_mirror_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph rbdmirrors + notify: restart ceph rbdmirrors when: - rbdmirror_group_name in group_names - ceph_rbd_mirror_container_inspect_before_pull.get('rc') == 0 @@ -268,8 +260,7 @@ set_fact: ceph_nfs_image_updated: "{{ ceph_nfs_image_repodigest_before_pulling != image_repodigest_after_pulling }}" changed_when: true - notify: - - restart ceph nfss + notify: restart ceph nfss when: - nfs_group_name in group_names - ceph_nfs_container_inspect_before_pull.get('rc') == 0 @@ -280,25 +271,22 @@ {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}" delegate_to: localhost - when: - - (ceph_docker_dev_image is defined and ceph_docker_dev_image) + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) run_once: true - name: copy ceph dev image file copy: src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" - when: - - (ceph_docker_dev_image is defined and ceph_docker_dev_image) + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) - name: load ceph dev image command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" - when: - - (ceph_docker_dev_image is defined and ceph_docker_dev_image) + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) - name: remove tmp ceph dev image file file: name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" state: absent - when: - - (ceph_docker_dev_image is defined and ceph_docker_dev_image) + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) + diff --git a/roles/ceph-container-common/tasks/main.yml b/roles/ceph-container-common/tasks/main.yml index 418d3594a..9062c072b 100644 --- a/roles/ceph-container-common/tasks/main.yml +++ b/roles/ceph-container-common/tasks/main.yml @@ -1,27 +1,23 @@ --- - name: include pre_requisites/prerequisites.yml include_tasks: pre_requisites/prerequisites.yml - when: - - not is_atomic + when: not is_atomic - name: get docker version command: docker --version changed_when: false check_mode: no register: ceph_docker_version - when: - - container_binary == 'docker' + when: container_binary == 'docker' - name: set_fact ceph_docker_version ceph_docker_version.stdout.split set_fact: ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}" - when: - - container_binary == 'docker' + when: container_binary == 'docker' - name: include fetch_image.yml include_tasks: fetch_image.yml - tags: - - fetch_container_image + tags: fetch_container_image - name: get ceph version command: > @@ -37,4 +33,4 @@ ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" - name: include release.yml - include_tasks: release.yml \ No newline at end of file + include_tasks: release.yml diff --git a/roles/ceph-container-common/tasks/pre_requisites/prerequisites.yml b/roles/ceph-container-common/tasks/pre_requisites/prerequisites.yml index 737c5c139..f64995b78 100644 --- a/roles/ceph-container-common/tasks/pre_requisites/prerequisites.yml +++ b/roles/ceph-container-common/tasks/pre_requisites/prerequisites.yml @@ -13,8 +13,7 @@ when: - ansible_os_family == 'Debian' - container_package_name == 'docker-ce' - tags: - with_pkg + tags: with_pkg # ensure extras enabled for docker - name: enable extras on centos @@ -32,8 +31,7 @@ package: name: ['{{ container_package_name }}', '{{ container_binding_name }}'] update_cache: true - tags: - with_pkg + tags: with_pkg - name: start container service service: diff --git a/roles/ceph-container-common/tasks/release.yml b/roles/ceph-container-common/tasks/release.yml index d0ac6d3a4..0f0980668 100644 --- a/roles/ceph-container-common/tasks/release.yml +++ b/roles/ceph-container-common/tasks/release.yml @@ -2,29 +2,25 @@ - name: set_fact ceph_release jewel set_fact: ceph_release: jewel - when: - - ceph_version.split('.')[0] is version_compare('10', '==') + when: ceph_version.split('.')[0] is version_compare('10', '==') - name: set_fact ceph_release kraken set_fact: ceph_release: kraken - when: - - ceph_version.split('.')[0] is version_compare('11', '==') + when: ceph_version.split('.')[0] is version_compare('11', '==') - name: set_fact ceph_release luminous set_fact: ceph_release: luminous - when: - - ceph_version.split('.')[0] is version_compare('12', '==') + when: ceph_version.split('.')[0] is version_compare('12', '==') - name: set_fact ceph_release mimic set_fact: ceph_release: mimic - when: - - ceph_version.split('.')[0] is version_compare('13', '==') + when: ceph_version.split('.')[0] is version_compare('13', '==') - name: set_fact ceph_release nautilus set_fact: ceph_release: nautilus - when: - - ceph_version.split('.')[0] is version_compare('14', '==') + when: ceph_version.split('.')[0] is version_compare('14', '==') + diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml index ca1470064..74de769aa 100644 --- a/roles/ceph-facts/tasks/facts.yml +++ b/roles/ceph-facts/tasks/facts.yml @@ -30,14 +30,12 @@ - name: set_fact monitor_name ansible_hostname set_fact: monitor_name: "{{ ansible_hostname }}" - when: - - not mon_use_fqdn + when: not mon_use_fqdn - name: set_fact monitor_name ansible_fqdn set_fact: monitor_name: "{{ ansible_fqdn }}" - when: - - mon_use_fqdn + when: mon_use_fqdn - name: set_fact docker_exec_cmd set_fact: @@ -67,8 +65,7 @@ set_fact: ceph_current_status: rc: 1 - when: - - rolling_update or groups.get(mon_group_name, []) | length == 0 + when: rolling_update or groups.get(mon_group_name, []) | length == 0 - name: create a local fetch directory if it does not exist file: @@ -77,21 +74,18 @@ delegate_to: localhost changed_when: false become: false - when: - - (cephx or generate_fsid) + when: cephx or generate_fsid - name: get current fsid command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid" register: rolling_update_fsid delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}" - when: - - rolling_update + when: rolling_update - name: set_fact fsid set_fact: fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}" - when: - - rolling_update + when: rolling_update - name: set_fact ceph_current_status (convert to json) set_fact: @@ -103,8 +97,7 @@ - name: set_fact fsid from ceph_current_status set_fact: fsid: "{{ ceph_current_status.fsid }}" - when: - - ceph_current_status.fsid is defined + when: ceph_current_status.fsid is defined - name: fsid realted tasks when: @@ -126,34 +119,29 @@ - name: set_fact mds_name ansible_hostname set_fact: mds_name: "{{ ansible_hostname }}" - when: - - not mds_use_fqdn + when: not mds_use_fqdn - name: set_fact mds_name ansible_fqdn set_fact: mds_name: "{{ ansible_fqdn }}" - when: - - mds_use_fqdn + when: mds_use_fqdn - name: set_fact rbd_client_directory_owner ceph set_fact: rbd_client_directory_owner: ceph - when: - - rbd_client_directory_owner is not defined + when: rbd_client_directory_owner is not defined or not rbd_client_directory_owner - name: set_fact rbd_client_directory_group rbd_client_directory_group set_fact: rbd_client_directory_group: ceph - when: - - rbd_client_directory_group is not defined + when: rbd_client_directory_group is not defined or not rbd_client_directory_group - name: set_fact rbd_client_directory_mode 0770 set_fact: rbd_client_directory_mode: "0770" - when: - - rbd_client_directory_mode is not defined + when: rbd_client_directory_mode is not defined or not rbd_client_directory_mode - name: resolve device link(s) @@ -281,15 +269,13 @@ - name: import_tasks set_radosgw_address.yml import_tasks: set_radosgw_address.yml - when: - - inventory_hostname in groups.get(rgw_group_name, []) + when: inventory_hostname in groups.get(rgw_group_name, []) - name: set_fact rgw_instances set_fact: rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': _radosgw_address, 'radosgw_frontend_port': radosgw_frontend_port|int + item|int}]) }}" with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }} - when: - - inventory_hostname in groups.get(rgw_group_name, []) + when: inventory_hostname in groups.get(rgw_group_name, []) - name: set ntp service name depending on OS family block: diff --git a/roles/ceph-facts/tasks/set_monitor_address.yml b/roles/ceph-facts/tasks/set_monitor_address.yml index 09a08bd4d..f92df4134 100644 --- a/roles/ceph-facts/tasks/set_monitor_address.yml +++ b/roles/ceph-facts/tasks/set_monitor_address.yml @@ -2,8 +2,7 @@ - name: set_fact _monitor_address to monitor_address_block ipv4 set_fact: _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - hostvars[item]['monitor_address_block'] is defined @@ -13,8 +12,7 @@ - name: set_fact _monitor_address to monitor_address_block ipv6 set_fact: _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | last | ipwrap }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - hostvars[item]['monitor_address_block'] is defined @@ -24,8 +22,7 @@ - name: set_fact _monitor_address to monitor_address set_fact: _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - hostvars[item]['monitor_address'] is defined @@ -34,8 +31,7 @@ - name: set_fact _monitor_address to monitor_interface - ipv4 set_fact: _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - ip_version == 'ipv4' @@ -46,8 +42,7 @@ - name: set_fact _monitor_address to monitor_interface - ipv6 set_fact: _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - ip_version == 'ipv6' diff --git a/roles/ceph-facts/tasks/set_radosgw_address.yml b/roles/ceph-facts/tasks/set_radosgw_address.yml index 6e4fb1cd5..1ff6f71fa 100644 --- a/roles/ceph-facts/tasks/set_radosgw_address.yml +++ b/roles/ceph-facts/tasks/set_radosgw_address.yml @@ -35,11 +35,9 @@ - name: set_fact _radosgw_address to radosgw_interface - ipv4 set_fact: _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}" - when: - - ip_version == 'ipv4' + when: ip_version == 'ipv4' - name: set_fact _radosgw_address to radosgw_interface - ipv6 set_fact: _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}" - when: - - ip_version == 'ipv6' + when: ip_version == 'ipv6' diff --git a/roles/ceph-handler/handlers/main.yml b/roles/ceph-handler/handlers/main.yml index 14d01612d..9333b9cc2 100644 --- a/roles/ceph-handler/handlers/main.yml +++ b/roles/ceph-handler/handlers/main.yml @@ -5,8 +5,7 @@ - name: update apt cache apt: update-cache: yes - when: - - ansible_os_family == 'Debian' + when: ansible_os_family == 'Debian' register: result until: result is succeeded @@ -140,8 +139,7 @@ group: root mode: 0750 listen: "restart ceph mdss" - when: - - mds_group_name in group_names + when: mds_group_name in group_names - name: restart ceph mds daemon(s) - non container command: /usr/bin/env bash /tmp/restart_mds_daemon.sh @@ -188,8 +186,7 @@ group: root mode: 0750 listen: "restart ceph rgws" - when: - - rgw_group_name in group_names + when: rgw_group_name in group_names - name: restart ceph rgw daemon(s) - non container command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh @@ -236,8 +233,7 @@ group: root mode: 0750 listen: "restart ceph nfss" - when: - - nfs_group_name in group_names + when: nfs_group_name in group_names - name: restart ceph nfs daemon(s) - non container command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh @@ -284,8 +280,7 @@ group: root mode: 0750 listen: "restart ceph rbdmirrors" - when: - - rbdmirror_group_name in group_names + when: rbdmirror_group_name in group_names - name: restart ceph rbd mirror daemon(s) - non container command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh @@ -332,8 +327,7 @@ group: root mode: 0750 listen: "restart ceph mgrs" - when: - - mgr_group_name in group_names + when: mgr_group_name in group_names - name: restart ceph mgr daemon(s) - non container command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh @@ -382,8 +376,7 @@ group: root mode: 0750 listen: "restart ceph tcmu-runner" - when: - - iscsi_gw_group_name in group_names + when: iscsi_gw_group_name in group_names - name: restart tcmu-runner command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh @@ -415,8 +408,7 @@ group: root mode: 0750 listen: "restart ceph rbd-target-gw" - when: - - iscsi_gw_group_name in group_names + when: iscsi_gw_group_name in group_names - name: restart rbd-target-gw command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh @@ -448,8 +440,7 @@ group: root mode: 0750 listen: "restart ceph rbd-target-api" - when: - - iscsi_gw_group_name in group_names + when: iscsi_gw_group_name in group_names - name: restart rbd-target-api command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh diff --git a/roles/ceph-handler/tasks/check_running_cluster.yml b/roles/ceph-handler/tasks/check_running_cluster.yml index 0418d2ffe..0f08a0ae3 100644 --- a/roles/ceph-handler/tasks/check_running_cluster.yml +++ b/roles/ceph-handler/tasks/check_running_cluster.yml @@ -1,10 +1,8 @@ --- - name: include check_running_containers.yml include_tasks: check_running_containers.yml - when: - - containerized_deployment + when: containerized_deployment - name: include check_socket_non_container.yml include_tasks: check_socket_non_container.yml - when: - - not containerized_deployment + when: not containerized_deployment diff --git a/roles/ceph-handler/tasks/check_running_containers.yml b/roles/ceph-handler/tasks/check_running_containers.yml index c0590fe41..6720c8085 100644 --- a/roles/ceph-handler/tasks/check_running_containers.yml +++ b/roles/ceph-handler/tasks/check_running_containers.yml @@ -5,8 +5,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(mon_group_name, []) + when: inventory_hostname in groups.get(mon_group_name, []) - name: check for an osd container command: "{{ container_binary }} ps -q --filter='name=ceph-osd'" @@ -14,8 +13,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(osd_group_name, []) + when: inventory_hostname in groups.get(osd_group_name, []) - name: check for a mds container command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'" @@ -23,8 +21,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(mds_group_name, []) + when: inventory_hostname in groups.get(mds_group_name, []) - name: check for a rgw container command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'" @@ -32,8 +29,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(rgw_group_name, []) + when: inventory_hostname in groups.get(rgw_group_name, []) - name: check for a mgr container command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'" @@ -41,8 +37,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(mgr_group_name, []) + when: inventory_hostname in groups.get(mgr_group_name, []) - name: check for a rbd mirror container command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'" @@ -50,8 +45,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(rbdmirror_group_name, []) + when: inventory_hostname in groups.get(rbdmirror_group_name, []) - name: check for a nfs container command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'" @@ -59,8 +53,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(nfs_group_name, []) + when: inventory_hostname in groups.get(nfs_group_name, []) - name: check for a tcmu-runner container command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'" @@ -68,8 +61,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(iscsi_gw_group_name, []) + when: inventory_hostname in groups.get(iscsi_gw_group_name, []) - name: check for a rbd-target-api container command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'" @@ -77,8 +69,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(iscsi_gw_group_name, []) + when: inventory_hostname in groups.get(iscsi_gw_group_name, []) - name: check for a rbd-target-gw container command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'" @@ -86,5 +77,4 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(iscsi_gw_group_name, []) + when: inventory_hostname in groups.get(iscsi_gw_group_name, []) diff --git a/roles/ceph-handler/tasks/check_socket_non_container.yml b/roles/ceph-handler/tasks/check_socket_non_container.yml index 0afe3eaa8..689858026 100644 --- a/roles/ceph-handler/tasks/check_socket_non_container.yml +++ b/roles/ceph-handler/tasks/check_socket_non_container.yml @@ -5,8 +5,7 @@ failed_when: false check_mode: no register: mon_socket_stat - when: - - inventory_hostname in groups.get(mon_group_name, []) + when: inventory_hostname in groups.get(mon_group_name, []) - name: check if the ceph mon socket is in-use command: fuser --silent {{ mon_socket_stat.stdout }} @@ -34,8 +33,7 @@ failed_when: false check_mode: no register: osd_socket_stat - when: - - inventory_hostname in groups.get(osd_group_name, []) + when: inventory_hostname in groups.get(osd_group_name, []) - name: check if the ceph osd socket is in-use command: fuser --silent {{ osd_socket_stat.stdout }} @@ -63,8 +61,7 @@ failed_when: false check_mode: no register: mds_socket_stat - when: - - inventory_hostname in groups.get(mds_group_name, []) + when: inventory_hostname in groups.get(mds_group_name, []) - name: check if the ceph mds socket is in-use command: fuser --silent {{ mds_socket_stat.stdout }} @@ -92,8 +89,7 @@ failed_when: false check_mode: no register: rgw_socket_stat - when: - - inventory_hostname in groups.get(rgw_group_name, []) + when: inventory_hostname in groups.get(rgw_group_name, []) - name: check if the ceph rgw socket is in-use command: fuser --silent {{ rgw_socket_stat.stdout }} @@ -121,8 +117,7 @@ failed_when: false check_mode: no register: mgr_socket_stat - when: - - inventory_hostname in groups.get(mgr_group_name, []) + when: inventory_hostname in groups.get(mgr_group_name, []) - name: check if the ceph mgr socket is in-use command: fuser --silent {{ mgr_socket_stat.stdout }} @@ -150,8 +145,7 @@ failed_when: false check_mode: no register: rbd_mirror_socket_stat - when: - - inventory_hostname in groups.get(rbdmirror_group_name, []) + when: inventory_hostname in groups.get(rbdmirror_group_name, []) - name: check if the ceph rbd mirror socket is in-use command: fuser --silent {{ rbd_mirror_socket_stat.stdout }} @@ -178,8 +172,7 @@ failed_when: false check_mode: no register: nfs_socket_stat - when: - - inventory_hostname in groups.get(nfs_group_name, []) + when: inventory_hostname in groups.get(nfs_group_name, []) - name: check if the ceph nfs ganesha socket is in-use command: fuser --silent {{ nfs_socket_stat.stdout }} @@ -206,8 +199,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(iscsi_gw_group_name, []) + when: inventory_hostname in groups.get(iscsi_gw_group_name, []) - name: check for a rbd-target-api command: "pgrep rbd-target-api" @@ -215,8 +207,7 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(iscsi_gw_group_name, []) + when: inventory_hostname in groups.get(iscsi_gw_group_name, []) - name: check for a rbd-target-gw command: "pgrep name=rbd-target-gw" @@ -224,5 +215,4 @@ changed_when: false failed_when: false check_mode: no - when: - - inventory_hostname in groups.get(iscsi_gw_group_name, []) + when: inventory_hostname in groups.get(iscsi_gw_group_name, []) diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index 6bfe58047..dc89664e4 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -7,13 +7,10 @@ ignore_errors: true check_mode: no changed_when: false - tags: - - firewall - when: - - not containerized_deployment + tags: firewall + when: not containerized_deployment -- when: - - (firewalld_pkg_query.get('rc', 1) == 0 +- when: (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) block: - name: start firewalld @@ -36,8 +33,7 @@ when: - mon_group_name is defined - mon_group_name in group_names - tags: - - firewall + tags: firewall - name: open manager ports firewalld: @@ -50,8 +46,7 @@ when: - mgr_group_name is defined - mgr_group_name in group_names - tags: - - firewall + tags: firewall - name: open osd ports firewalld: @@ -67,8 +62,7 @@ when: - osd_group_name is defined - osd_group_name in group_names - tags: - - firewall + tags: firewall - name: open rgw ports firewalld: @@ -81,8 +75,7 @@ when: - rgw_group_name is defined - rgw_group_name in group_names - tags: - - firewall + tags: firewall - name: open mds ports firewalld: @@ -95,8 +88,7 @@ when: - mds_group_name is defined - mds_group_name in group_names - tags: - - firewall + tags: firewall - name: open nfs ports firewalld: @@ -109,8 +101,7 @@ when: - nfs_group_name is defined - nfs_group_name in group_names - tags: - - firewall + tags: firewall - name: open nfs ports (portmapper) firewalld: @@ -123,8 +114,7 @@ when: - nfs_group_name is defined - nfs_group_name in group_names - tags: - - firewall + tags: firewall - name: open rbdmirror ports firewalld: @@ -137,8 +127,7 @@ when: - rbdmirror_group_name is defined - rbdmirror_group_name in group_names - tags: - - firewall + tags: firewall - name: open iscsi target ports firewalld: @@ -151,8 +140,7 @@ when: - iscsi_gw_group_name is defined - iscsi_gw_group_name in group_names - tags: - - firewall + tags: firewall - name: open iscsi api ports firewalld: @@ -165,7 +153,6 @@ when: - iscsi_gw_group_name is defined - iscsi_gw_group_name in group_names - tags: - - firewall + tags: firewall - meta: flush_handlers diff --git a/roles/ceph-infra/tasks/setup_ntp.yml b/roles/ceph-infra/tasks/setup_ntp.yml index 485da72ac..7313b7b48 100644 --- a/roles/ceph-infra/tasks/setup_ntp.yml +++ b/roles/ceph-infra/tasks/setup_ntp.yml @@ -10,8 +10,7 @@ state: present register: result until: result is succeeded - when: - - ntp_daemon_type == "ntpd" + when: ntp_daemon_type == "ntpd" - name: install chrony package: @@ -19,8 +18,7 @@ state: present register: result until: result is succeeded - when: - - ntp_daemon_type == "chronyd" + when: ntp_daemon_type == "chronyd" - name: enable the ntp daemon and disable the rest block: @@ -29,13 +27,11 @@ notify: - disable ntpd - disable chronyd - when: - - ntp_daemon_type == "timesyncd" + when: ntp_daemon_type == "timesyncd" - name: disable time sync using timesyncd if we are not using it command: timedatectl set-ntp no - when: - - ntp_daemon_type != "timesyncd" + when: ntp_daemon_type != "timesyncd" - name: enable ntpd service: @@ -45,8 +41,7 @@ notify: - disable chronyd - disable timesyncd - when: - - ntp_daemon_type == "ntpd" + when: ntp_daemon_type == "ntpd" - name: enable chronyd service: @@ -56,5 +51,4 @@ notify: - disable ntpd - disable timesyncd - when: - - ntp_daemon_type == "chronyd" + when: ntp_daemon_type == "chronyd" diff --git a/roles/ceph-iscsi-gw/tasks/common.yml b/roles/ceph-iscsi-gw/tasks/common.yml index 2c278b309..ce46d89dd 100644 --- a/roles/ceph-iscsi-gw/tasks/common.yml +++ b/roles/ceph-iscsi-gw/tasks/common.yml @@ -2,8 +2,7 @@ - name: make sure gateway_ip_list is configured fail: msg: "you must set a list of IPs (comma separated) for gateway_ip_list" - when: - - gateway_ip_list == "0.0.0.0" + when: gateway_ip_list == "0.0.0.0" - name: copy admin key copy: @@ -12,8 +11,7 @@ owner: "root" group: "root" mode: "{{ ceph_keyring_permissions }}" - when: - - cephx + when: cephx - name: deploy gateway settings, used by the ceph_iscsi_config modules template: @@ -44,5 +42,4 @@ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false - when: - - rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size + when: rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size diff --git a/roles/ceph-iscsi-gw/tasks/container/containerized.yml b/roles/ceph-iscsi-gw/tasks/container/containerized.yml index 7610d2bde..5b38d2f03 100644 --- a/roles/ceph-iscsi-gw/tasks/container/containerized.yml +++ b/roles/ceph-iscsi-gw/tasks/container/containerized.yml @@ -11,8 +11,7 @@ - tcmu-runner - rbd-target-gw - rbd-target-api - notify: - - restart ceph {{ item }} + notify: restart ceph {{ item }} - name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers systemd: diff --git a/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml b/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml index f8beb519c..f4ebd1bc5 100644 --- a/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml +++ b/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml @@ -26,8 +26,7 @@ become: False run_once: True with_items: "{{ crt_files_exist.results }}" - when: - - not item.stat.exists + when: not item.stat.exists - name: create pem shell: > @@ -38,8 +37,7 @@ run_once: True register: pem with_items: "{{ crt_files_exist.results }}" - when: - - not item.stat.exists + when: not item.stat.exists - name: create public key from pem shell: > @@ -48,10 +46,8 @@ delegate_to: localhost become: False run_once: True - when: - - pem.changed - tags: - - skip_ansible_lint + when: pem.changed + tags: skip_ansible_lint - name: copy crt file(s) to gateway nodes copy: diff --git a/roles/ceph-iscsi-gw/tasks/main.yml b/roles/ceph-iscsi-gw/tasks/main.yml index 77cfe618e..f93ece729 100644 --- a/roles/ceph-iscsi-gw/tasks/main.yml +++ b/roles/ceph-iscsi-gw/tasks/main.yml @@ -4,23 +4,19 @@ - name: include non-container/prerequisites.yml include_tasks: non-container/prerequisites.yml - when: - - not containerized_deployment + when: not containerized_deployment # deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files # and transfers them to /etc/ceph directory on each controller. SSL certs are used by # the API for https support. - name: include deploy_ssl_keys.yml include_tasks: deploy_ssl_keys.yml - when: - - generate_crt|bool + when: generate_crt|bool - name: include non-container/configure_iscsi.yml include_tasks: non-container/configure_iscsi.yml - when: - - not containerized_deployment + when: not containerized_deployment - name: include containerized.yml include_tasks: container/containerized.yml - when: - - containerized_deployment + when: containerized_deployment diff --git a/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml b/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml index 26c809d58..d311bb508 100644 --- a/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml +++ b/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml @@ -1,7 +1,6 @@ --- - name: red hat based systems tasks - when: - - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' block: - name: when ceph_iscsi_config_dev is true when: diff --git a/roles/ceph-mds/tasks/containerized.yml b/roles/ceph-mds/tasks/containerized.yml index c59ceb14c..f8e220482 100644 --- a/roles/ceph-mds/tasks/containerized.yml +++ b/roles/ceph-mds/tasks/containerized.yml @@ -7,8 +7,7 @@ set_fact: admin_keyring: - "/etc/ceph/{{ cluster }}.client.admin.keyring" - when: - - copy_admin_key + when: copy_admin_key - name: set_fact ceph_config_keys set_fact: @@ -18,8 +17,7 @@ - name: merge ceph_config_keys and admin_keyring set_fact: ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}" - when: - - copy_admin_key + when: copy_admin_key - name: stat for ceph config and keys stat: @@ -53,8 +51,7 @@ owner: "root" group: "root" mode: "0644" - notify: - - restart ceph mdss + notify: restart ceph mdss - name: systemd start mds container systemd: diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index 8e0aa2c6f..d87d343ea 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -40,15 +40,13 @@ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}" with_items: "{{ cephfs_pools | unique }}" changed_when: false - when: - - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size + when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size - name: customize pool min_size command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}" with_items: "{{ cephfs_pools | unique }}" changed_when: false - when: - - (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size + when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - name: check if ceph filesystem already exists command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}" @@ -61,8 +59,7 @@ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - check_existing_cephfs.rc != 0 + when: check_existing_cephfs.rc != 0 - name: assign application to cephfs pools command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs" @@ -71,12 +68,10 @@ - "{{ cephfs_metadata }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - check_existing_cephfs.rc != 0 + when: check_existing_cephfs.rc != 0 - name: set max_mds command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - mds_max_mds > 1 + when: mds_max_mds > 1 diff --git a/roles/ceph-mds/tasks/main.yml b/roles/ceph-mds/tasks/main.yml index 9d19a59db..c7a6f4bfa 100644 --- a/roles/ceph-mds/tasks/main.yml +++ b/roles/ceph-mds/tasks/main.yml @@ -8,8 +8,7 @@ - name: set_fact docker_exec_cmd set_fact: docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}" - when: - - containerized_deployment + when: containerized_deployment - name: include common.yml include_tasks: common.yml diff --git a/roles/ceph-mgr/tasks/common.yml b/roles/ceph-mgr/tasks/common.yml index 49907acae..d933a3e3d 100644 --- a/roles/ceph-mgr/tasks/common.yml +++ b/roles/ceph-mgr/tasks/common.yml @@ -24,8 +24,7 @@ environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - when: - - groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)" + when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)" - name: copy ceph keyring(s) if needed copy: @@ -48,5 +47,4 @@ owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "{{ ceph_keyring_permissions }}" - when: - - cephx \ No newline at end of file + when: cephx diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml index 6a92a2663..1acda2f56 100644 --- a/roles/ceph-mgr/tasks/main.yml +++ b/roles/ceph-mgr/tasks/main.yml @@ -2,8 +2,7 @@ - name: set_fact docker_exec_cmd set_fact: docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: - - containerized_deployment + when: containerized_deployment - name: include common.yml include_tasks: common.yml @@ -20,4 +19,4 @@ when: - ceph_mgr_modules | length > 0 - ((groups[mgr_group_name] | default([]) | length == 0 and inventory_hostname == groups[mon_group_name] | last) or - (groups[mgr_group_name] | default([]) | length > 0 and inventory_hostname == groups[mgr_group_name] | last)) \ No newline at end of file + (groups[mgr_group_name] | default([]) | length > 0 and inventory_hostname == groups[mgr_group_name] | last)) diff --git a/roles/ceph-mgr/tasks/mgr_modules.yml b/roles/ceph-mgr/tasks/mgr_modules.yml index e9ee82087..057e517c3 100644 --- a/roles/ceph-mgr/tasks/mgr_modules.yml +++ b/roles/ceph-mgr/tasks/mgr_modules.yml @@ -27,12 +27,10 @@ command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}" with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item not in ceph_mgr_modules + when: item not in ceph_mgr_modules - name: add modules to ceph-mgr command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}" with_items: "{{ ceph_mgr_modules }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) \ No newline at end of file + when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) diff --git a/roles/ceph-mgr/tasks/pre_requisite.yml b/roles/ceph-mgr/tasks/pre_requisite.yml index 1dcd7c43b..81bc623fa 100644 --- a/roles/ceph-mgr/tasks/pre_requisite.yml +++ b/roles/ceph-mgr/tasks/pre_requisite.yml @@ -5,8 +5,7 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result until: result is succeeded - when: - - ansible_os_family in ['RedHat', 'Suse'] + when: ansible_os_family in ['RedHat', 'Suse'] - name: install ceph-mgr packages for debian apt: @@ -15,5 +14,4 @@ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else '' }}" register: result until: result is succeeded - when: - - ansible_os_family == 'Debian' + when: ansible_os_family == 'Debian' diff --git a/roles/ceph-mgr/tasks/start_mgr.yml b/roles/ceph-mgr/tasks/start_mgr.yml index 3d7fa3625..8500651c9 100644 --- a/roles/ceph-mgr/tasks/start_mgr.yml +++ b/roles/ceph-mgr/tasks/start_mgr.yml @@ -25,10 +25,8 @@ owner: "root" group: "root" mode: "0644" - when: - - containerized_deployment - notify: - - restart ceph mgrs + when: containerized_deployment + notify: restart ceph mgrs - name: systemd start mgr systemd: diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml index cb82ef6c0..2bacbebf9 100644 --- a/roles/ceph-mon/tasks/ceph_keys.yml +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -17,8 +17,7 @@ changed_when: false - name: tasks for MONs when cephx is enabled - when: - - cephx + when: cephx block: - name: fetch ceph initial keys ceph_key: @@ -48,8 +47,7 @@ environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - with_items: - - "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}" run_once: True delegate_to: "{{ groups[mon_group_name][0] }}" @@ -58,22 +56,19 @@ src: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring" dest: "{{ fetch_directory }}/{{ fsid }}/{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring" flat: yes - with_items: - - "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}" + with_items: "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}" delegate_to: "{{ groups[mon_group_name][0] }}" -- name: copy keys to the ansible server - fetch: - src: "{{ item }}" - dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}" - flat: yes - with_items: - - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring - - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring - - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring - - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring - - /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring - - /etc/ceph/{{ cluster }}.client.admin.keyring - when: - - cephx - - inventory_hostname == groups[mon_group_name] | last + - name: copy keys to the ansible server + fetch: + src: "{{ item }}" + dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}" + flat: yes + with_items: + - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring + - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring + - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring + - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring + - /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring + - /etc/ceph/{{ cluster }}.client.admin.keyring + when: inventory_hostname == groups[mon_group_name] | last diff --git a/roles/ceph-mon/tasks/crush_rules.yml b/roles/ceph-mon/tasks/crush_rules.yml index 18428ecdd..adeb769c5 100644 --- a/roles/ceph-mon/tasks/crush_rules.yml +++ b/roles/ceph-mon/tasks/crush_rules.yml @@ -15,8 +15,7 @@ command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}" with_items: "{{ crush_rules | unique }}" changed_when: false - when: - - inventory_hostname == groups.get(mon_group_name) | last + when: inventory_hostname == groups.get(mon_group_name) | last - name: get id for new default crush rule command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}" diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index 4c027ea7b..a25eb60c4 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -108,8 +108,7 @@ --keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring args: creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring - when: - - cephx + when: cephx - name: ceph monitor mkfs without keyring command: > @@ -122,5 +121,4 @@ --fsid {{ fsid }} args: creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db - when: - - not cephx + when: not cephx diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml index 893f9ff57..f8b6ef3f2 100644 --- a/roles/ceph-mon/tasks/main.yml +++ b/roles/ceph-mon/tasks/main.yml @@ -2,8 +2,7 @@ - name: set_fact docker_exec_cmd set_fact: docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}" - when: - - containerized_deployment + when: containerized_deployment - name: include deploy_monitors.yml include_tasks: deploy_monitors.yml @@ -17,8 +16,7 @@ - name: include_tasks ceph_keys.yml include_tasks: ceph_keys.yml - when: - - not switch_to_containers | default(False) + when: not switch_to_containers | default(False) - name: include secure_cluster.yml include_tasks: secure_cluster.yml @@ -28,5 +26,5 @@ - name: crush_rules.yml include_tasks: crush_rules.yml - when: - - crush_rule_config + when: crush_rule_config + diff --git a/roles/ceph-mon/tasks/start_monitor.yml b/roles/ceph-mon/tasks/start_monitor.yml index b1be39945..f3ceadcab 100644 --- a/roles/ceph-mon/tasks/start_monitor.yml +++ b/roles/ceph-mon/tasks/start_monitor.yml @@ -27,8 +27,7 @@ owner: "root" group: "root" mode: "0644" - notify: - - restart ceph mons + notify: restart ceph mons when: containerized_deployment - name: start the monitor service diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml index 6f48c411f..c0b5a2e13 100644 --- a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -2,8 +2,7 @@ - name: set_fact docker_exec_cmd_nfs set_fact: docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: - - containerized_deployment + when: containerized_deployment - name: check if "{{ ceph_nfs_rgw_user }}" exists command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}" @@ -12,8 +11,7 @@ changed_when: false failed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - nfs_obj_gw + when: nfs_obj_gw - name: create rgw nfs user "{{ ceph_nfs_rgw_user }}" command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'" diff --git a/roles/ceph-nfs/tasks/ganesha_selinux_fix.yml b/roles/ceph-nfs/tasks/ganesha_selinux_fix.yml index 1c3c0ba7d..b07b7c06e 100644 --- a/roles/ceph-nfs/tasks/ganesha_selinux_fix.yml +++ b/roles/ceph-nfs/tasks/ganesha_selinux_fix.yml @@ -12,8 +12,7 @@ state: present register: result until: result is succeeded - when: - - selinuxstatus.stdout != 'Disabled' + when: selinuxstatus.stdout != 'Disabled' - name: test if ganesha_t is already permissive shell: | diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml index b68eb26a1..6e0099482 100644 --- a/roles/ceph-nfs/tasks/main.yml +++ b/roles/ceph-nfs/tasks/main.yml @@ -2,21 +2,18 @@ - name: set_fact docker_exec_cmd set_fact: docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}" - when: - - containerized_deployment + when: containerized_deployment - name: include common.yml include_tasks: common.yml - name: include pre_requisite_non_container.yml include_tasks: pre_requisite_non_container.yml - when: - - not containerized_deployment + when: not containerized_deployment - name: include pre_requisite_container.yml include_tasks: pre_requisite_container.yml - when: - - containerized_deployment + when: containerized_deployment - name: include create_rgw_nfs_user.yml import_tasks: create_rgw_nfs_user.yml diff --git a/roles/ceph-nfs/tasks/pre_requisite_container.yml b/roles/ceph-nfs/tasks/pre_requisite_container.yml index 1f2e02a96..5206c1285 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_container.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_container.yml @@ -3,8 +3,7 @@ set_fact: admin_keyring: - "/etc/ceph/{{ cluster }}.client.admin.keyring" - when: - - copy_admin_key + when: copy_admin_key - name: set_fact ceph_config_keys set_fact: @@ -14,8 +13,7 @@ - name: merge ceph_config_keys and admin_keyring set_fact: ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}" - when: - - copy_admin_key + when: copy_admin_key - name: stat for config and keys stat: @@ -39,8 +37,7 @@ with_together: - "{{ ceph_config_keys }}" - "{{ statconfig.results }}" - when: - - item.1.stat.exists + when: item.1.stat.exists - name: create dbus service file become: true @@ -50,10 +47,8 @@ owner: "root" group: "root" mode: "0644" - when: - - ceph_nfs_dynamic_exports + when: ceph_nfs_dynamic_exports - name: reload dbus configuration command: "killall -SIGHUP dbus-daemon" - when: - - ceph_nfs_dynamic_exports + when: ceph_nfs_dynamic_exports diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml index 9b00c1377..5b5fb7056 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml @@ -1,13 +1,11 @@ --- - name: include red hat based system related tasks include_tasks: pre_requisite_non_container_red_hat.yml - when: - - ansible_os_family == 'RedHat' + when: ansible_os_family == 'RedHat' - name: include debian based system related tasks include_tasks: pre_requisite_non_container_debian.yml - when: - - ansible_os_family == 'Debian' + when: ansible_os_family == 'Debian' - name: install nfs rgw/cephfs gateway - suse zypper: @@ -41,12 +39,10 @@ - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" } - { name: "/var/log/ceph", create: true } - { name: "/var/run/ceph", create: true } - when: - - item.create|bool + when: item.create|bool - name: cephx related tasks - when: - - cephx + when: cephx block: - name: copy bootstrap cephx keys copy: @@ -55,14 +51,11 @@ owner: "ceph" group: "ceph" mode: "0600" - with_items: - - { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } - when: - - item.copy_key|bool + with_items: { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } + when: item.copy_key|bool - name: nfs object gateway related tasks - when: - - nfs_obj_gw + when: nfs_obj_gw block: - name: create rados gateway keyring command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml index 6c9e8d18d..db718f308 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml @@ -1,11 +1,9 @@ --- - name: debian based systems - repo handling - when: - - ceph_origin == 'repository' + when: ceph_origin == 'repository' block: - name: stable repos specific tasks - when: - - nfs_ganesha_stable + when: nfs_ganesha_stable - ceph_repository == 'community' block: - name: add nfs-ganesha stable repository @@ -22,8 +20,7 @@ retries: 5 delay: 2 until: update_ganesha_apt_cache | success - when: - - add_ganesha_apt_repo | changed + when: add_ganesha_apt_repo | changed - name: debian based systems - dev repos specific tasks when: @@ -90,13 +87,11 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result until: result is succeeded - when: - - nfs_file_gw + when: nfs_file_gw - name: install red hat storage nfs obj gateway apt: name: nfs-ganesha-rgw state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result until: result is succeeded - when: - - nfs_obj_gw + when: nfs_obj_gw diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml index 719812bed..8d80a036b 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml @@ -1,7 +1,6 @@ --- - name: red hat based systems - repo handling - when: - - ceph_origin == 'repository' + when: ceph_origin == 'repository' block: - name: add nfs-ganesha stable repository yum_repository: @@ -42,8 +41,7 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result until: result is succeeded - when: - - nfs_file_gw + when: nfs_file_gw - name: install redhat nfs-ganesha-rgw and ceph-radosgw packages package: @@ -51,5 +49,4 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result until: result is succeeded - when: - - nfs_obj_gw + when: nfs_obj_gw diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml index 6536881ea..9ea501a0e 100644 --- a/roles/ceph-nfs/tasks/start_nfs.yml +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -2,8 +2,7 @@ - name: set_fact docker_exec_cmd_nfs set_fact: docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: - - containerized_deployment + when: containerized_deployment - name: check if rados index object exists shell: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" @@ -11,8 +10,7 @@ failed_when: false register: rados_index_exists check_mode: no - when: - - ceph_nfs_rados_backend + when: ceph_nfs_rados_backend delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -41,8 +39,7 @@ group: "root" mode: "0644" config_type: ini - notify: - - restart ceph nfss + notify: restart ceph nfss - name: create exports directory file: @@ -51,8 +48,7 @@ owner: "root" group: "root" mode: "0755" - when: - - ceph_nfs_dynamic_exports + when: ceph_nfs_dynamic_exports - name: create exports dir index file copy: @@ -62,8 +58,7 @@ owner: "root" group: "root" mode: "0644" - when: - - ceph_nfs_dynamic_exports + when: ceph_nfs_dynamic_exports - name: generate systemd unit file become: true @@ -73,10 +68,8 @@ owner: "root" group: "root" mode: "0644" - when: - - containerized_deployment - notify: - - restart ceph nfss + when: containerized_deployment + notify: restart ceph nfss - name: systemd start nfs container systemd: diff --git a/roles/ceph-osd/tasks/common.yml b/roles/ceph-osd/tasks/common.yml index daf1f4819..2d5ff45ce 100644 --- a/roles/ceph-osd/tasks/common.yml +++ b/roles/ceph-osd/tasks/common.yml @@ -6,8 +6,7 @@ owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "0755" - when: - - cephx + when: cephx with_items: - /var/lib/ceph/bootstrap-osd/ - /var/lib/ceph/osd/ diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 0abbc00de..a68ec560c 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -20,18 +20,15 @@ when: - containerized_deployment - ceph_osd_numactl_opts != "" - tags: - - with_pkg + tags: with_pkg - name: install lvm2 package: name: lvm2 register: result until: result is succeeded - when: - - not is_atomic - tags: - - with_pkg + when: not is_atomic + tags: with_pkg - name: include_tasks common.yml include_tasks: common.yml diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index 2fa2f79be..ba8682d37 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -39,8 +39,7 @@ - "{{ created_pools.results }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item.1.get('rc', 0) != 0 + when: item.1.get('rc', 0) != 0 - name: customize pool size command: > @@ -49,8 +48,7 @@ with_items: "{{ openstack_pools | unique }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false - when: - - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size + when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size - name: customize pool min_size command: > @@ -59,16 +57,14 @@ with_items: "{{ openstack_pools | unique }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false - when: - - (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size + when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - name: assign application to pool(s) command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" with_items: "{{ openstack_pools | unique }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item.application is defined + when: item.application is defined - name: create openstack cephx key(s) ceph_key: @@ -107,4 +103,4 @@ when: - cephx - openstack_config - - item.0 != groups[mon_group_name] \ No newline at end of file + - item.0 != groups[mon_group_name] diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index 27b357b7f..a9e0fb85b 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -1,7 +1,6 @@ --- - name: container specific tasks - when: - - containerized_deployment + when: containerized_deployment block: - name: umount ceph disk (if on openstack) mount: @@ -9,8 +8,7 @@ src: /dev/vdb fstype: ext3 state: unmounted - when: - - ceph_docker_on_openstack + when: ceph_docker_on_openstack - name: generate ceph osd docker run script become: true @@ -21,8 +19,7 @@ group: "root" mode: "0744" setype: "bin_t" - notify: - - restart ceph osds + notify: restart ceph osds # this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph - name: get osd ids @@ -49,10 +46,8 @@ owner: "root" group: "root" mode: "0644" - notify: - - restart ceph osds - when: - - containerized_deployment + notify: restart ceph osds + when: containerized_deployment - name: systemd start osd systemd: diff --git a/roles/ceph-osd/tasks/system_tuning.yml b/roles/ceph-osd/tasks/system_tuning.yml index 6a825fd61..4087317be 100644 --- a/roles/ceph-osd/tasks/system_tuning.yml +++ b/roles/ceph-osd/tasks/system_tuning.yml @@ -23,8 +23,7 @@ group: "root" mode: "0755" register: "tmpfiles_d" - when: - - disable_transparent_hugepage + when: disable_transparent_hugepage - name: disable transparent hugepage template: @@ -35,8 +34,7 @@ mode: "0644" force: "yes" validate: "systemd-tmpfiles --create %s" - when: - - disable_transparent_hugepage + when: disable_transparent_hugepage - name: get default vm.min_free_kbytes command: sysctl -b vm.min_free_kbytes diff --git a/roles/ceph-rbd-mirror/tasks/common.yml b/roles/ceph-rbd-mirror/tasks/common.yml index a48a40eeb..a9a7e32c0 100644 --- a/roles/ceph-rbd-mirror/tasks/common.yml +++ b/roles/ceph-rbd-mirror/tasks/common.yml @@ -18,8 +18,7 @@ -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring args: creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring - when: - - not containerized_deployment + when: not containerized_deployment - name: set rbd-mirror key permissions file: @@ -27,5 +26,4 @@ owner: "ceph" group: "ceph" mode: "{{ ceph_keyring_permissions }}" - when: - - not containerized_deployment \ No newline at end of file + when: not containerized_deployment diff --git a/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml index 68432bd1d..d863127e2 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/start_docker_rbd_mirror.yml @@ -8,8 +8,7 @@ owner: "root" group: "root" mode: "0644" - notify: - - restart ceph rbdmirrors + notify: restart ceph rbdmirrors - name: systemd start rbd mirror container systemd: diff --git a/roles/ceph-rbd-mirror/tasks/main.yml b/roles/ceph-rbd-mirror/tasks/main.yml index 7b8036cfb..22480c0bb 100644 --- a/roles/ceph-rbd-mirror/tasks/main.yml +++ b/roles/ceph-rbd-mirror/tasks/main.yml @@ -2,23 +2,19 @@ - name: set_fact docker_exec_cmd set_fact: docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}" - when: - - containerized_deployment + when: containerized_deployment - name: include pre_requisite.yml include_tasks: pre_requisite.yml - when: - - not containerized_deployment + when: not containerized_deployment - name: include common.yml include_tasks: common.yml - when: - - cephx + when: cephx - name: include start_rbd_mirror.yml include_tasks: start_rbd_mirror.yml - when: - - not containerized_deployment + when: not containerized_deployment - name: include configure_mirroring.yml include_tasks: configure_mirroring.yml @@ -28,5 +24,4 @@ - name: include docker/main.yml include_tasks: docker/main.yml - when: - - containerized_deployment + when: containerized_deployment diff --git a/roles/ceph-rbd-mirror/tasks/pre_requisite.yml b/roles/ceph-rbd-mirror/tasks/pre_requisite.yml index 7a97097a3..09b27aa56 100644 --- a/roles/ceph-rbd-mirror/tasks/pre_requisite.yml +++ b/roles/ceph-rbd-mirror/tasks/pre_requisite.yml @@ -7,5 +7,4 @@ state: present register: result until: result is succeeded - tags: - - package-install \ No newline at end of file + tags: package-install diff --git a/roles/ceph-rgw/tasks/common.yml b/roles/ceph-rgw/tasks/common.yml index 268a0c0bb..b40573e84 100644 --- a/roles/ceph-rgw/tasks/common.yml +++ b/roles/ceph-rgw/tasks/common.yml @@ -6,8 +6,7 @@ owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "0755" - with_items: - - "{{ rbd_client_admin_socket_path }}" + with_items: "{{ rbd_client_admin_socket_path }}" - name: create rados gateway instance directories file: @@ -17,8 +16,7 @@ group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "0755" with_items: "{{ rgw_instances }}" - when: - - rgw_instances is defined + when: rgw_instances is defined - name: copy ceph keyring(s) if needed copy: diff --git a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml index e48d0b906..ddc262436 100644 --- a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml +++ b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml @@ -19,8 +19,7 @@ owner: "root" group: "root" mode: "0644" - notify: - - restart ceph rgws + notify: restart ceph rgws - name: systemd start rgw container systemd: diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index c785aff83..43248517c 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -23,8 +23,7 @@ when: rgw_multisite - name: rgw pool related tasks - when: - - rgw_create_pools is defined + when: rgw_create_pools is defined block: - name: create rgw pools if rgw_create_pools is defined command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}" @@ -43,5 +42,4 @@ run_once: true register: result until: result is succeeded - when: - - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size + when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size diff --git a/roles/ceph-rgw/tasks/multisite/destroy.yml b/roles/ceph-rgw/tasks/multisite/destroy.yml index 82342e269..aac0e86bc 100644 --- a/roles/ceph-rgw/tasks/multisite/destroy.yml +++ b/roles/ceph-rgw/tasks/multisite/destroy.yml @@ -12,8 +12,7 @@ failed_when: false register: rgw_remove_zone_from_zonegroup changed_when: rgw_remove_zone_from_zonegroup.rc == 0 - notify: - - update period + notify: update period - name: delete the zone command: radosgw-admin zone delete --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} @@ -44,5 +43,4 @@ when: - rgw_zone is defined - rgw_zonegroup is defined - notify: - - restart rgw + notify: restart rgw diff --git a/roles/ceph-rgw/tasks/multisite/main.yml b/roles/ceph-rgw/tasks/multisite/main.yml index 93a08e677..40b884f8d 100644 --- a/roles/ceph-rgw/tasks/multisite/main.yml +++ b/roles/ceph-rgw/tasks/multisite/main.yml @@ -22,5 +22,4 @@ section: "client.rgw.{{ ansible_hostname }}" option: "rgw_zone" value: "{{ rgw_zone }}" - notify: - - restart rgw + notify: restart rgw diff --git a/roles/ceph-rgw/tasks/multisite/master.yml b/roles/ceph-rgw/tasks/multisite/master.yml index 26449f3f3..5c1836ece 100644 --- a/roles/ceph-rgw/tasks/multisite/master.yml +++ b/roles/ceph-rgw/tasks/multisite/master.yml @@ -3,37 +3,30 @@ command: "{{ docker_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'No such file or directory' in realmcheck.stderr" + when: "'No such file or directory' in realmcheck.stderr" - name: create the zonegroup command: "{{ docker_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'No such file or directory' in zonegroupcheck.stderr" + when: "'No such file or directory' in zonegroupcheck.stderr" - name: create the zone command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'No such file or directory' in zonecheck.stderr" + when: "'No such file or directory' in zonecheck.stderr" - name: create the zone user command: "{{ docker_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'could not fetch user info: no user info saved' in usercheck.stderr" - notify: - - update period + when: "'could not fetch user info: no user info saved' in usercheck.stderr" + notify: update period - name: add other endpoints to the zone command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - rgw_multisite_endpoints_list is defined - notify: - - update period + when: rgw_multisite_endpoints_list is defined + notify: update period diff --git a/roles/ceph-rgw/tasks/multisite/secondary.yml b/roles/ceph-rgw/tasks/multisite/secondary.yml index 24a44cf46..53ee83bd9 100644 --- a/roles/ceph-rgw/tasks/multisite/secondary.yml +++ b/roles/ceph-rgw/tasks/multisite/secondary.yml @@ -3,15 +3,13 @@ command: "{{ docker_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'No such file or directory' in realmcheck.stderr" + when: "'No such file or directory' in realmcheck.stderr" - name: fetch the period command: "{{ docker_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'No such file or directory' in realmcheck.stderr" + when: "'No such file or directory' in realmcheck.stderr" - name: set default realm command: "{{ docker_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}" @@ -29,16 +27,12 @@ command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - "'No such file or directory' in zonecheck.stderr" - notify: - - update period + when: "'No such file or directory' in zonecheck.stderr" + notify: update period - name: add other endpoints to the zone command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - when: - - rgw_multisite_endpoints_list is defined - notify: - - update period + when: rgw_multisite_endpoints_list is defined + notify: update period diff --git a/roles/ceph-rgw/tasks/openstack-keystone.yml b/roles/ceph-rgw/tasks/openstack-keystone.yml index 2579f1a19..3af8405da 100644 --- a/roles/ceph-rgw/tasks/openstack-keystone.yml +++ b/roles/ceph-rgw/tasks/openstack-keystone.yml @@ -5,8 +5,7 @@ state: present register: result until: result is succeeded - when: - - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' + when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' - name: install libnss3-tools on debian package: @@ -14,8 +13,7 @@ state: present register: result until: result is succeeded - when: - - ansible_pkg_mgr == 'apt' + when: ansible_pkg_mgr == 'apt' - name: create nss directory for keystone certificates file: @@ -33,5 +31,4 @@ with_items: - "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'" - "openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d {{ radosgw_nss_db_path }} -n signing_cert -t 'P,P,P'" - tags: - - skip_ansible_lint + tags: skip_ansible_lint diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml index 6219d2799..96d08a18c 100644 --- a/roles/ceph-rgw/tasks/pre_requisite.yml +++ b/roles/ceph-rgw/tasks/pre_requisite.yml @@ -5,8 +5,7 @@ creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring changed_when: false with_items: "{{ rgw_instances }}" - when: - - cephx + when: cephx - name: set rados gateway instance key permissions file: @@ -15,5 +14,4 @@ group: "ceph" mode: "0600" with_items: "{{ rgw_instances }}" - when: - - cephx + when: cephx diff --git a/roles/ceph-rgw/tasks/start_radosgw.yml b/roles/ceph-rgw/tasks/start_radosgw.yml index 0419c1247..39102e82f 100644 --- a/roles/ceph-rgw/tasks/start_radosgw.yml +++ b/roles/ceph-rgw/tasks/start_radosgw.yml @@ -3,8 +3,7 @@ file: state: directory path: "/etc/systemd/system/ceph-radosgw@.service.d/" - when: - - ceph_rgw_systemd_overrides is defined + when: ceph_rgw_systemd_overrides is defined - name: add ceph-rgw systemd service overrides config_template: @@ -12,8 +11,7 @@ dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf" config_overrides: "{{ ceph_rgw_systemd_overrides | default({}) }}" config_type: "ini" - when: - - ceph_rgw_systemd_overrides is defined + when: ceph_rgw_systemd_overrides is defined - name: start rgw instance service: diff --git a/roles/ceph-validate/tasks/check_eth_mon.yml b/roles/ceph-validate/tasks/check_eth_mon.yml index ce42be48b..5f805473c 100644 --- a/roles/ceph-validate/tasks/check_eth_mon.yml +++ b/roles/ceph-validate/tasks/check_eth_mon.yml @@ -2,14 +2,12 @@ - name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}" fail: msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}" - when: - - monitor_interface not in ansible_interfaces + when: monitor_interface not in ansible_interfaces - name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}" fail: msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}" - when: - - not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active'] + when: not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active'] - name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}" fail: diff --git a/roles/ceph-validate/tasks/check_eth_rgw.yml b/roles/ceph-validate/tasks/check_eth_rgw.yml index d9fff4860..73db6078e 100644 --- a/roles/ceph-validate/tasks/check_eth_rgw.yml +++ b/roles/ceph-validate/tasks/check_eth_rgw.yml @@ -2,14 +2,12 @@ - name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}" fail: msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}" - when: - - radosgw_interface not in ansible_interfaces + when: radosgw_interface not in ansible_interfaces - name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}" fail: msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}" - when: - - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false" + when: hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false" - name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}" fail: diff --git a/roles/ceph-validate/tasks/check_ipaddr_mon.yml b/roles/ceph-validate/tasks/check_ipaddr_mon.yml index e5cb1074f..59f7426aa 100644 --- a/roles/ceph-validate/tasks/check_ipaddr_mon.yml +++ b/roles/ceph-validate/tasks/check_ipaddr_mon.yml @@ -2,5 +2,4 @@ - name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}" fail: msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}" - when: - - hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[inventory_hostname]['monitor_address_block']) | length == 0 + when: hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[inventory_hostname]['monitor_address_block']) | length == 0 diff --git a/roles/ceph-validate/tasks/check_system.yml b/roles/ceph-validate/tasks/check_system.yml index 5041896b7..4a538f9a8 100644 --- a/roles/ceph-validate/tasks/check_system.yml +++ b/roles/ceph-validate/tasks/check_system.yml @@ -2,20 +2,17 @@ - name: fail on unsupported system fail: msg: "System not supported {{ ansible_system }}" - when: - - ansible_system not in ['Linux'] + when: ansible_system not in ['Linux'] - name: fail on unsupported architecture fail: msg: "Architecture not supported {{ ansible_architecture }}" - when: - - ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64'] + when: ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64'] - name: fail on unsupported distribution fail: msg: "Distribution not supported {{ ansible_os_family }}" - when: - - ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse'] + when: ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse'] - name: red hat based systems tasks when: @@ -25,12 +22,10 @@ - name: fail on unsupported distribution for red hat ceph storage fail: msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL >= 7.3" - when: - - ansible_distribution_version | version_compare('7.3', '<') + when: ansible_distribution_version | version_compare('7.3', '<') - name: subscription manager related tasks - when: - - ceph_repository_type == 'cdn' + when: ceph_repository_type == 'cdn' block: - name: determine if node is registered with subscription-manager command: subscription-manager identity @@ -42,8 +37,7 @@ - name: fail on unregistered red hat rhcs linux fail: msg: "You must register your machine with subscription-manager" - when: - - subscription.rc != '0' + when: subscription.rc != '0' - name: fail on unsupported distribution for ubuntu cloud archive fail: @@ -62,8 +56,7 @@ - name: fail on unsupported ansible version (1.X) fail: msg: "Ansible version must be >= 2.7.x, please update!" - when: - - ansible_version.major|int < 2 + when: ansible_version.major|int < 2 - name: fail on unsupported ansible version fail: @@ -75,16 +68,14 @@ - name: fail if systemd is not present fail: msg: "Systemd must be present" - when: - - ansible_service_mgr != 'systemd' + when: ansible_service_mgr != 'systemd' - name: check if iscsi gateways is target on supported distros and versions block: - name: fail on unsupported distribution for iscsi gateways fail: msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora" - when: - - ansible_distribution not in ['RedHat', 'CentOS', 'Fedora'] + when: ansible_distribution not in ['RedHat', 'CentOS', 'Fedora'] - name: fail on unsupported distribution version for iscsi gateways fail: @@ -92,6 +83,5 @@ when: - ansible_distribution_version < '7.4' - ansible_distribution in ['RedHat', 'CentOS'] - when: - - iscsi_gw_group_name in group_names + when: iscsi_gw_group_name in group_names diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index 1c3ea4bfc..30bf99697 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -11,14 +11,12 @@ - not use_fqdn_yes_i_am_sure - name: debian based systems tasks - when: - - ansible_os_family == 'Debian' + when: ansible_os_family == 'Debian' block: - name: fail if local scenario is enabled on debian fail: msg: "'local' installation scenario not supported on Debian systems" - when: - - ceph_origin == 'local' + when: ceph_origin == 'local' - name: verify that ceph_rhcs_cdn_debian_repo url is valid for red hat storage fail: msg: "ceph_rhcs_cdn_debian_repo url is invalid, please set your customername:customerpasswd" @@ -95,8 +93,7 @@ - name: include check_iscsi.yml include_tasks: check_iscsi.yml - when: - - iscsi_gw_group_name in group_names + when: iscsi_gw_group_name in group_names - name: warn about radosgw_civetweb_num_threads option deprecation debug: diff --git a/site-container.yml.sample b/site-container.yml.sample index d10a66a05..516116951 100644 --- a/site-container.yml.sample +++ b/site-container.yml.sample @@ -28,8 +28,7 @@ # pre-tasks for following import - - name: gather facts setup: - when: - - not delegate_facts_host | bool + when: not delegate_facts_host | bool - name: gather and delegate facts setup: @@ -37,21 +36,18 @@ delegate_facts: True with_items: "{{ groups['all'] }}" run_once: true - when: - - delegate_facts_host | bool + when: delegate_facts_host | bool - name: check if it is atomic host stat: path: /run/ostree-booted register: stat_ostree - tags: - - always + tags: always - name: set_fact is_atomic set_fact: is_atomic: '{{ stat_ostree.stat.exists }}' - tags: - - always + tags: always - name: check if podman binary is present stat: @@ -397,8 +393,7 @@ name: ceph-handler - import_role: name: ceph-container-common - when: - - inventory_hostname == groups.get('clients', ['']) | first + when: inventory_hostname == groups.get('clients', ['']) | first - import_role: name: ceph-config tags: ['ceph_update_config'] diff --git a/site.yml.sample b/site.yml.sample index 2290a3b70..9aa1923c8 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -18,8 +18,7 @@ any_errors_fatal: true become: true - tags: - - always + tags: always vars: delegate_facts_host: True @@ -32,8 +31,7 @@ - name: gather facts setup: - when: - - not delegate_facts_host | bool + when: not delegate_facts_host | bool - name: gather and delegate facts setup: @@ -41,8 +39,7 @@ delegate_facts: True with_items: "{{ groups['all'] }}" run_once: true - when: - - delegate_facts_host | bool + when: delegate_facts_host | bool - name: install required packages for fedora > 23 raw: sudo dnf -y install python2-dnf libselinux-python ntp @@ -56,14 +53,12 @@ stat: path: /run/ostree-booted register: stat_ostree - tags: - - always + tags: always - name: set_fact is_atomic set_fact: is_atomic: '{{ stat_ostree.stat.exists }}' - tags: - - always + tags: always tasks: - import_role: diff --git a/tests/functional/dev_setup.yml b/tests/functional/dev_setup.yml index 30136eb2f..d75a6e5ab 100644 --- a/tests/functional/dev_setup.yml +++ b/tests/functional/dev_setup.yml @@ -2,8 +2,7 @@ - hosts: localhost gather_facts: false become: no - tags: - - vagrant_setup + tags: vagrant_setup tasks: @@ -32,4 +31,4 @@ - name: print contents of {{ group_vars_path }} command: "cat {{ group_vars_path }}" - when: dev_setup \ No newline at end of file + when: dev_setup diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml index 250507983..e6deb9d8f 100644 --- a/tests/functional/lvm_setup.yml +++ b/tests/functional/lvm_setup.yml @@ -13,13 +13,11 @@ stat: path: /run/ostree-booted register: stat_ostree - tags: - - always + tags: always - name: set_fact is_atomic set_fact: is_atomic: '{{ stat_ostree.stat.exists }}' - tags: - - always + tags: always # Some images may not have lvm2 installed - name: install lvm2 package: @@ -27,8 +25,7 @@ state: present register: result until: result is succeeded - when: - - not is_atomic + when: not is_atomic - name: create physical volume command: pvcreate /dev/sdb failed_when: false diff --git a/tests/functional/rgw_multisite.yml b/tests/functional/rgw_multisite.yml index 45502c560..b354f9ac0 100644 --- a/tests/functional/rgw_multisite.yml +++ b/tests/functional/rgw_multisite.yml @@ -21,8 +21,7 @@ state: present register: result until: result is succeeded - when: - - not is_atomic + when: not is_atomic - name: generate and upload a random 10Mb file - containerized deployment command: > diff --git a/tests/functional/rhcs_setup.yml b/tests/functional/rhcs_setup.yml index abbb4030f..a7b87aecb 100644 --- a/tests/functional/rhcs_setup.yml +++ b/tests/functional/rhcs_setup.yml @@ -2,8 +2,7 @@ - hosts: localhost gather_facts: false become: yes - tags: - - vagrant_setup + tags: vagrant_setup tasks: - name: change centos/7 vagrant box name to rhel7 @@ -106,8 +105,7 @@ baseurl: "{{ repo_url }}/MON/x86_64/os/" gpgcheck: no enabled: yes - when: - - not is_atomic + when: not is_atomic - hosts: osds gather_facts: false @@ -121,8 +119,7 @@ baseurl: "{{ repo_url }}/OSD/x86_64/os/" gpgcheck: no enabled: yes - when: - - not is_atomic + when: not is_atomic - name: set MTU on eth2 command: "ifconfig eth2 mtu 1400 up" @@ -139,5 +136,4 @@ baseurl: "{{ repo_url }}/Tools/x86_64/os/" gpgcheck: no enabled: yes - when: - - not is_atomic + when: not is_atomic diff --git a/tests/functional/setup.yml b/tests/functional/setup.yml index 52f8ebc5c..f956c2b22 100644 --- a/tests/functional/setup.yml +++ b/tests/functional/setup.yml @@ -21,8 +21,7 @@ state: present register: result until: result is succeeded - when: - - not is_atomic + when: not is_atomic - name: centos based systems - configure repos block: