osd: commonize start_osd code

since `ceph-volume` introduction, there is no need to split those tasks.

Let's refact this part of the code so it's clearer.

By the way, this was breaking rolling_update.yml when `openstack_config:
true` playbook because nothing ensured OSDs were started in ceph-osd role (In
`openstack_config.yml` there is a check ensuring all OSD are UP which was
obviously failing) and resulted with OSDs on the last OSD node not started
anyway.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3325/head
Guillaume Abrioux 2018-11-07 11:45:29 +01:00 committed by Sébastien Han
parent 3ac6619fb9
commit f7fcc012e9
4 changed files with 54 additions and 94 deletions

View File

@ -1,3 +0,0 @@
---
- name: include start_docker_osd.yml
include_tasks: start_docker_osd.yml

View File

@ -1,66 +0,0 @@
---
# For openstack VMs modify the mount point below depending on if the Openstack
# VM deploy tool defaults to mounting ephemeral disks
- name: umount ceph disk (if on openstack)
mount:
name: /mnt
src: /dev/vdb
fstype: ext3
state: unmounted
when:
- ceph_docker_on_openstack
- name: test if the container image has the disk_list function
command: docker run --rm --entrypoint=stat {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list.sh
changed_when: false
failed_when: false
register: disk_list
when:
- osd_scenario != 'lvm'
- name: generate ceph osd docker run script
become: true
template:
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
owner: "root"
group: "root"
mode: "0744"
notify:
- restart ceph osds
- name: generate systemd unit file
become: true
template:
src: "{{ role_path }}/templates/ceph-osd.service.j2"
dest: /etc/systemd/system/ceph-osd@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph osds
- name: collect osd ids
shell: >
docker run --rm
--privileged=true
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket
-v /etc/ceph:/etc/ceph:z
-v /dev:/dev
--entrypoint=ceph-volume
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
lvm list --format json | python -c 'import sys, json; print("\n".join(json.load(sys.stdin).keys()))'
changed_when: false
failed_when: false
register: ceph_osd_ids
when:
- containerized_deployment
- osd_scenario == 'lvm'
- name: systemd start osd container
systemd:
name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }}
state: started
enabled: yes
daemon_reload: yes
with_items: "{{ devices if osd_scenario != 'lvm' else ceph_osd_ids.stdout_lines }}"

View File

@ -66,14 +66,6 @@
- name: include_tasks start_osds.yml - name: include_tasks start_osds.yml
include_tasks: start_osds.yml include_tasks: start_osds.yml
when:
- not containerized_deployment
- osd_scenario != 'lvm'
- name: include_tasks docker/main.yml
include_tasks: docker/main.yml
when:
- containerized_deployment
- name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module - name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact: set_fact:

View File

@ -1,15 +1,60 @@
--- ---
- name: get osd id - block:
shell: | # For openstack VMs modify the mount point below depending on if the Openstack
ls /var/lib/ceph/osd/ | sed 's/.*-//' # VM deploy tool defaults to mounting ephemeral disks
- name: umount ceph disk (if on openstack)
mount:
name: /mnt
src: /dev/vdb
fstype: ext3
state: unmounted
when:
- ceph_docker_on_openstack
- name: generate ceph osd docker run script
become: true
template:
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
owner: "root"
group: "root"
mode: "0744"
notify:
- restart ceph osds
when:
- containerized_deployment
- name: set_fact docker_exec_start_osd
set_fact:
docker_exec_start_osd: "{{ 'docker run --rm --privileged=true -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
- name: collect osd ids
shell: >
{{ docker_exec_start_osd }} lvm list --format json
changed_when: false changed_when: false
failed_when: false failed_when: false
check_mode: no register: ceph_osd_ids
register: osd_id
until: osd_id.stdout_lines|length == devices|unique|length - name: generate systemd unit file
retries: 10 become: true
template:
src: "{{ role_path }}/templates/ceph-osd.service.j2"
dest: /etc/systemd/system/ceph-osd@.service
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph osds
when: when:
- osd_scenario != 'lvm' - containerized_deployment
- name: systemd start osd
systemd:
name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }}
state: started
enabled: yes
daemon_reload: yes
with_items: "{{ devices if osd_scenario != 'lvm' else (ceph_osd_ids.stdout | from_json).keys() }}"
- name: ensure systemd service override directory exists - name: ensure systemd service override directory exists
file: file:
@ -27,12 +72,4 @@
config_type: "ini" config_type: "ini"
when: when:
- ceph_osd_systemd_overrides is defined - ceph_osd_systemd_overrides is defined
- ansible_service_mgr == 'systemd' - ansible_service_mgr == 'systemd'
- name: ensure osd daemons are started
service:
name: ceph-osd@{{ item }}
state: started
enabled: true
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
changed_when: false