ceph-ansible/roles/ceph-osd/tasks/main.yml

117 lines
4.0 KiB
YAML
Raw Normal View History

---
- name: set_fact add_osd
set_fact:
add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}"
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true
run_once: true
when: containerized_deployment | bool
- name: include_tasks system_tuning.yml
include_tasks: system_tuning.yml
- name: install dependencies
package:
name: parted
state: present
register: result
until: result is succeeded
when:
- not containerized_deployment | bool
- ansible_os_family != 'ClearLinux'
- name: install numactl when needed
package:
name: numactl
register: result
until: result is succeeded
when:
- containerized_deployment | bool
- ceph_osd_numactl_opts | length > 0
tags: with_pkg
- name: include_tasks common.yml
include_tasks: common.yml
- name: set noup flag
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd set noup"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
changed_when: False
when: not rolling_update | default(False) | bool
- name: include container_options_facts.yml
include_tasks: container_options_facts.yml
when: containerized_deployment | bool
- name: include_tasks scenarios/lvm.yml
include_tasks: scenarios/lvm.yml
when:
- lvm_volumes|length > 0
- not rolling_update|default(False) | bool
- name: include_tasks scenarios/lvm-batch.yml
include_tasks: scenarios/lvm-batch.yml
when:
- devices|length > 0
- not rolling_update|default(False) | bool
- name: include_tasks start_osds.yml
include_tasks: start_osds.yml
- name: unset noup flag
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd unset noup"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: False
when:
- not rolling_update | default(False) | bool
- inventory_hostname == ansible_play_hosts_all | last
- name: wait for all osd to be up
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: wait_for_all_osds_up
retries: "{{ nb_retry_wait_osd_up }}"
delay: "{{ delay_wait_osd_up }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
until:
- (wait_for_all_osds_up.stdout | from_json)["osdmap"]["osdmap"]["num_osds"] | int > 0
- (wait_for_all_osds_up.stdout | from_json)["osdmap"]["osdmap"]["num_osds"] == (wait_for_all_osds_up.stdout | from_json)["osdmap"]["osdmap"]["num_up_osds"]
when:
- inventory_hostname == ansible_play_hosts_all | last
- name: include crush_rules.yml
include_tasks: crush_rules.yml
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(false) | bool
- name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact:
openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
with_items: "{{ openstack_keys }}"
when:
- not add_osd | bool
- openstack_config | bool
- item.get('mon_cap', None)
# it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
- name: set_fact keys - override keys_tmp with keys
set_fact:
openstack_keys: "{{ openstack_keys_tmp }}"
when:
- not add_osd | bool
- openstack_keys_tmp is defined
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include_tasks: openstack_config.yml
when:
- not add_osd | bool
- not rolling_update | default(False) | bool
- openstack_config | bool
- inventory_hostname == groups[osd_group_name] | last