mirror of https://github.com/ceph/ceph-ansible.git
ceph-facts: only get fsid when monitor are present
When running the rolling_update playbook with an inventory without
monitor nodes defined (like external scenario) then we can't retrieve
the cluster fsid from the running monitor.
In this scenario we have to pass this information manually (group_vars
or host_vars).
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1877426
Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit f63022dfec
)
pull/5754/head
parent
dd05d8ba90
commit
0c0a930374
|
@ -861,25 +861,33 @@
|
|||
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release octopus"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: containerized_deployment | bool
|
||||
when:
|
||||
- containerized_deployment | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
- name: non container | disallow pre-octopus OSDs and enable all new octopus-only functionality
|
||||
command: "ceph --cluster {{ cluster }} osd require-osd-release octopus"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: not containerized_deployment | bool
|
||||
when:
|
||||
- not containerized_deployment | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
- name: container | enable msgr2 protocol
|
||||
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} mon enable-msgr2"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: containerized_deployment | bool
|
||||
when:
|
||||
- containerized_deployment | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
- name: non container | enable msgr2 protocol
|
||||
command: "ceph --cluster {{ cluster }} mon enable-msgr2"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: not containerized_deployment | bool
|
||||
when:
|
||||
- not containerized_deployment | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
- import_role:
|
||||
name: ceph-handler
|
||||
|
|
|
@ -137,12 +137,16 @@
|
|||
register: rolling_update_fsid
|
||||
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
|
||||
until: rolling_update_fsid is succeeded
|
||||
when: rolling_update | bool
|
||||
when:
|
||||
- rolling_update | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
- name: set_fact fsid
|
||||
set_fact:
|
||||
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
|
||||
when: rolling_update | bool
|
||||
when:
|
||||
- rolling_update | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
- name: set_fact ceph_current_status (convert to json)
|
||||
set_fact:
|
||||
|
|
|
@ -64,4 +64,18 @@ commands=
|
|||
|
||||
bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
|
||||
|
||||
ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
|
||||
ireallymeanit=yes \
|
||||
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
|
||||
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
|
||||
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
|
||||
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
|
||||
generate_fsid=false \
|
||||
ceph_docker_registry_auth=True \
|
||||
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
||||
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
|
||||
"
|
||||
|
||||
bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
|
||||
|
||||
vagrant destroy --force
|
||||
|
|
Loading…
Reference in New Issue