diff --git a/infrastructure-playbooks/add-osd.yml b/infrastructure-playbooks/add-osd.yml index 19aa6fc15..d21c5c021 100644 --- a/infrastructure-playbooks/add-osd.yml +++ b/infrastructure-playbooks/add-osd.yml @@ -121,3 +121,10 @@ delegate_to: "{{ groups['mons'][0] }}" run_once: True changed_when: False + + - name: warn user about deprecation + debug: + msg: | + Playbook has complete. + However, note that it will be deprecated in a future release. + You can achieve the same goal using the main playbook with --limit \ No newline at end of file diff --git a/roles/ceph-handler/tasks/handler_osds.yml b/roles/ceph-handler/tasks/handler_osds.yml index 17354a2c2..390b84b2c 100644 --- a/roles/ceph-handler/tasks/handler_osds.yml +++ b/roles/ceph-handler/tasks/handler_osds.yml @@ -3,6 +3,11 @@ set_fact: _osd_handler_called: True +- name: unset noup flag + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: False + # This does not just restart OSDs but everything else too. Unfortunately # at this time the ansible role does not have an OSD id list to use # for restarting them specifically. diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index a3038fcc6..81975049a 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -1,4 +1,8 @@ --- +- name: set_fact add_osd + set_fact: + add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}" + - name: include_tasks system_tuning.yml include_tasks: system_tuning.yml @@ -33,6 +37,13 @@ - name: include_tasks common.yml include_tasks: common.yml +- name: set noup flag + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: True + changed_when: False + when: not rolling_update | default(False) | bool + - name: include container_options_facts.yml include_tasks: container_options_facts.yml @@ -72,7 +83,7 @@ openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}" with_items: "{{ openstack_keys }}" when: - - not add_osd|default(False) | bool + - not add_osd | bool - openstack_config | bool - item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap @@ -81,13 +92,21 @@ set_fact: openstack_keys: "{{ openstack_keys_tmp }}" when: - - not add_osd|default(False) | bool + - not add_osd | bool - openstack_keys_tmp is defined +- name: unset noup flag + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: False + when: + - not rolling_update | default(False) | bool + - inventory_hostname == ansible_play_hosts_all | last + # Create the pools listed in openstack_pools - name: include openstack_config.yml include_tasks: openstack_config.yml when: - - not add_osd|default(False) | bool + - not add_osd | bool - openstack_config | bool - inventory_hostname == groups[osd_group_name] | last