ceph-facts: only get fsid when monitor are present

When running the rolling_update playbook with an inventory without
monitor nodes defined (like external scenario) then we can't retrieve
the cluster fsid from the running monitor.
In this scenario we have to pass this information manually (group_vars
or host_vars).

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1877426

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit f63022dfec)
pull/5754/head
Dimitri Savineau 2020-09-10 10:12:13 -04:00 committed by Guillaume Abrioux
parent dd05d8ba90
commit 0c0a930374
3 changed files with 32 additions and 6 deletions

View File

@ -861,25 +861,33 @@
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release octopus" command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release octopus"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True run_once: True
when: containerized_deployment | bool when:
- containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
- name: non container | disallow pre-octopus OSDs and enable all new octopus-only functionality - name: non container | disallow pre-octopus OSDs and enable all new octopus-only functionality
command: "ceph --cluster {{ cluster }} osd require-osd-release octopus" command: "ceph --cluster {{ cluster }} osd require-osd-release octopus"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True run_once: True
when: not containerized_deployment | bool when:
- not containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
- name: container | enable msgr2 protocol - name: container | enable msgr2 protocol
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} mon enable-msgr2" command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} mon enable-msgr2"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True run_once: True
when: containerized_deployment | bool when:
- containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
- name: non container | enable msgr2 protocol - name: non container | enable msgr2 protocol
command: "ceph --cluster {{ cluster }} mon enable-msgr2" command: "ceph --cluster {{ cluster }} mon enable-msgr2"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True run_once: True
when: not containerized_deployment | bool when:
- not containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
- import_role: - import_role:
name: ceph-handler name: ceph-handler

View File

@ -137,12 +137,16 @@
register: rolling_update_fsid register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}" delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
until: rolling_update_fsid is succeeded until: rolling_update_fsid is succeeded
when: rolling_update | bool when:
- rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
- name: set_fact fsid - name: set_fact fsid
set_fact: set_fact:
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}" fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
when: rolling_update | bool when:
- rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
- name: set_fact ceph_current_status (convert to json) - name: set_fact ceph_current_status (convert to json)
set_fact: set_fact:

View File

@ -64,4 +64,18 @@ commands=
bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf" bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
vagrant destroy --force vagrant destroy --force