defaults: fix rgw_hostname

A couple if things were wrong in the initial commit:

* ceph_release_num[ceph_release] >= ceph_release_num['luminous'] will
never work since the ceph_release fact is set in the roles after. So
either ceph-common or ceph-docker-common set it

* we can easily re-use the initial command to check if a cluster is
running, it's more elegant than running it twice.

* set the fact rgw_hostname on rgw nodes only

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1618678
Signed-off-by: Sébastien Han <seb@redhat.com>
pull/3050/head
Sébastien Han 2018-08-21 20:50:31 +02:00
parent 0d448da695
commit 6d7fa99ff7
4 changed files with 28 additions and 40 deletions

View File

@ -9,7 +9,6 @@
msg: "/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring not found" msg: "/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring not found"
when: when:
- not initial_mon_keyring.stat.exists - not initial_mon_keyring.stat.exists
- ceph_current_fsid.rc == 0
- name: get existing initial mon keyring if it already exists but not monitor_keyring.conf in {{ fetch_directory }} - name: get existing initial mon keyring if it already exists but not monitor_keyring.conf in {{ fetch_directory }}
shell: | shell: |
@ -17,7 +16,6 @@
register: monitor_keyring register: monitor_keyring
when: when:
- not monitor_keyring_conf.stat.exists - not monitor_keyring_conf.stat.exists
- ceph_current_fsid.rc == 0
- name: test existing initial mon keyring - name: test existing initial mon keyring
command: ceph --connect-timeout 3 --cluster {{ cluster }} --keyring /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring -n mon. fsid command: ceph --connect-timeout 3 --cluster {{ cluster }} --keyring /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring -n mon. fsid

View File

@ -89,9 +89,9 @@
run_once: true run_once: true
when: when:
- cephx - cephx
- not monitor_keyring_conf.stat.exists
- ceph_current_fsid.rc == 0
- mon_group_name in group_names - mon_group_name in group_names
- not monitor_keyring_conf.stat.exists
- ceph_current_status.fsid is defined
- name: include create_rbd_client_dir.yml - name: include create_rbd_client_dir.yml
include: create_rbd_client_dir.yml include: create_rbd_client_dir.yml

View File

@ -89,7 +89,7 @@
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`) # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- osd_socket_stat.rc == 0 - osd_socket_stat.rc == 0
- ceph_current_fsid.rc == 0 - ceph_current_status.fsid is defined
- handler_health_osd_check - handler_health_osd_check
- hostvars[item]['_osd_handler_called'] | default(False) - hostvars[item]['_osd_handler_called'] | default(False)
with_items: "{{ groups[osd_group_name] }}" with_items: "{{ groups[osd_group_name] }}"

View File

@ -32,11 +32,11 @@
# because it blindly picks a mon, which may be down because # because it blindly picks a mon, which may be down because
# of the rolling update # of the rolling update
- name: is ceph running already? - name: is ceph running already?
command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} fsid" command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
changed_when: false changed_when: false
failed_when: false failed_when: false
check_mode: no check_mode: no
register: ceph_current_fsid register: ceph_current_status
run_once: true run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: when:
@ -54,9 +54,9 @@
# set this as a default when performing a rolling_update # set this as a default when performing a rolling_update
# so the rest of the tasks here will succeed # so the rest of the tasks here will succeed
- name: set_fact ceph_current_fsid rc 1 - name: set_fact ceph_current_status rc 1
set_fact: set_fact:
ceph_current_fsid: ceph_current_status:
rc: 1 rc: 1
when: when:
- rolling_update or groups.get(mon_group_name, []) | length == 0 - rolling_update or groups.get(mon_group_name, []) | length == 0
@ -71,11 +71,18 @@
when: when:
- (cephx or generate_fsid) - (cephx or generate_fsid)
- name: set_fact fsid ceph_current_fsid.stdout - name: set_fact ceph_current_status (convert to json)
set_fact: set_fact:
fsid: "{{ ceph_current_fsid.stdout }}" ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
when: when:
- ceph_current_fsid.get('rc', 1) == 0 - not rolling_update
- ceph_current_status.rc == 0
- name: set_fact fsid from ceph_current_status
set_fact:
fsid: "{{ ceph_current_status.fsid }}"
when:
- ceph_current_status.fsid is defined
# Set ceph_release to ceph_stable by default # Set ceph_release to ceph_stable by default
- name: set_fact ceph_release ceph_stable_release - name: set_fact ceph_release ceph_stable_release
@ -91,7 +98,7 @@
become: false become: false
when: when:
- generate_fsid - generate_fsid
- ceph_current_fsid.rc != 0 - ceph_current_status.fsid is undefined
- name: reuse cluster fsid when cluster is already running - name: reuse cluster fsid when cluster is already running
local_action: local_action:
@ -100,7 +107,7 @@
creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
become: false become: false
when: when:
- ceph_current_fsid.get('rc', 1) == 0 - ceph_current_status.fsid is defined
- name: read cluster fsid if it already exists - name: read cluster fsid if it already exists
local_action: local_action:
@ -218,36 +225,19 @@
- containerized_deployment - containerized_deployment
- ceph_docker_image | search("rhceph") - ceph_docker_image | search("rhceph")
- block: - name: set_fact rgw_hostname - fqdn
- name: get current cluster status (if already running) set_fact:
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" rgw_hostname: "{% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() %}{% if key == ansible_fqdn %}{{ key }}{% endif %}{% endfor %}"
changed_when: false
failed_when: false
check_mode: no
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- not rolling_update
- groups.get(mon_group_name, []) | length > 0
register: ceph_current_status
- name: set_fact ceph_current_status (convert to json)
set_fact:
ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
- name: set_fact rgw_hostname
set_fact:
rgw_hostname: "{% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() %}{% if key == ansible_fqdn %}{{ key }}{% endif %}{% endfor %}"
when: ceph_current_status['servicemap']['services']['rgw'] is defined
when: when:
- ceph_current_fsid.get('rc', 1) == 0 - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
- inventory_hostname in groups.get(rgw_group_name, []) - ceph_current_status['servicemap'] is defined
# no servicemap before luminous - ceph_current_status['servicemap']['services'] is defined
- ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- ansible_hostname != ansible_fqdn - ansible_hostname != ansible_fqdn
- name: set_fact rgw_hostname - name: set_fact rgw_hostname - no fqdn
set_fact: set_fact:
rgw_hostname: "{{ ansible_hostname }}" rgw_hostname: "{{ ansible_hostname }}"
when: when:
- inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
- rgw_hostname is undefined - rgw_hostname is undefined