--- - name: set_fact add_osd set_fact: add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}" - name: include_tasks system_tuning.yml include_tasks: system_tuning.yml - name: install dependencies package: name: parted state: present register: result until: result is succeeded when: - not containerized_deployment | bool - ansible_os_family != 'ClearLinux' - name: install numactl when needed package: name: numactl register: result until: result is succeeded when: - containerized_deployment | bool - ceph_osd_numactl_opts | length > 0 tags: with_pkg - name: include_tasks common.yml include_tasks: common.yml - name: set noup flag command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: True changed_when: False when: not rolling_update | default(False) | bool - name: include container_options_facts.yml include_tasks: container_options_facts.yml - name: include_tasks scenarios/lvm.yml include_tasks: scenarios/lvm.yml when: - lvm_volumes|length > 0 - not rolling_update|default(False) | bool - name: include_tasks scenarios/lvm-batch.yml include_tasks: scenarios/lvm-batch.yml when: - devices|length > 0 - not rolling_update|default(False) | bool - name: include_tasks start_osds.yml include_tasks: start_osds.yml - name: wait for all osd to be up command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" register: wait_for_all_osds_up retries: "{{ nb_retry_wait_osd_up }}" delay: "{{ delay_wait_osd_up }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true until: - (wait_for_all_osds_up.stdout | from_json)["osdmap"]["num_osds"] | int > 0 - (wait_for_all_osds_up.stdout | from_json)["osdmap"]["num_osds"] == (wait_for_all_osds_up.stdout | from_json)["osdmap"]["num_up_osds"] - name: include crush_rules.yml include_tasks: crush_rules.yml when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(false) | bool - name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module set_fact: openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}" with_items: "{{ openstack_keys }}" when: - not add_osd | bool - openstack_config | bool - item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap - name: set_fact keys - override keys_tmp with keys set_fact: openstack_keys: "{{ openstack_keys_tmp }}" when: - not add_osd | bool - openstack_keys_tmp is defined - name: unset noup flag command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: False when: - not rolling_update | default(False) | bool - inventory_hostname == ansible_play_hosts_all | last # Create the pools listed in openstack_pools - name: include openstack_config.yml include_tasks: openstack_config.yml when: - not add_osd | bool - openstack_config | bool - inventory_hostname == groups[osd_group_name] | last