ceph-ansible/roles/ceph-facts/tasks/facts.yml

374 lines
14 KiB
YAML
Raw Normal View History

---
- name: check if it is atomic host
stat:
path: /run/ostree-booted
register: stat_ostree
- name: set_fact is_atomic
set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
- name: import_tasks container_binary.yml
import_tasks: container_binary.yml
# In case ansible_python_interpreter is set by the user,
# ansible will not discover python and discovered_interpreter_python
# will not be set
- name: set_fact discovered_interpreter_python
set_fact:
discovered_interpreter_python: "{{ ansible_python_interpreter }}"
when: ansible_python_interpreter is defined
# If ansible_python_interpreter is not defined, this can result in the
# discovered_interpreter_python fact from being set. This fails later in this
# playbook and is used elsewhere.
- name: set_fact discovered_interpreter_python if not previously set
set_fact:
discovered_interpreter_python: "{{ ansible_facts['discovered_interpreter_python'] }}"
when:
- discovered_interpreter_python is not defined
- ansible_facts['discovered_interpreter_python'] is defined
# Set ceph_release to ceph_stable by default
- name: set_fact ceph_release ceph_stable_release
set_fact:
ceph_release: "{{ ceph_stable_release }}"
- name: set_fact monitor_name ansible_facts['hostname']
set_fact:
monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups.get(mon_group_name, []) }}"
run_once: true
when: groups.get(mon_group_name, []) | length > 0
- name: find a running monitor
when: groups.get(mon_group_name, []) | length > 0
block:
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
- name: find a running mon container
command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
register: find_running_mon_container
failed_when: false
run_once: true
delegate_to: "{{ item }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- containerized_deployment | bool
- name: check for a ceph mon socket
shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
changed_when: false
failed_when: false
check_mode: no
register: mon_socket_stat
run_once: true
delegate_to: "{{ item }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- not containerized_deployment | bool
- name: check if the ceph mon socket is in-use
command: grep -q {{ item.stdout }} /proc/net/unix
changed_when: false
failed_when: false
check_mode: no
register: mon_socket
run_once: true
delegate_to: "{{ hostvars[item.item]['inventory_hostname'] }}"
with_items: "{{ mon_socket_stat.results }}"
when:
- not containerized_deployment | bool
- item.rc == 0
- name: set_fact running_mon - non_container
set_fact:
running_mon: "{{ hostvars[item.item.item]['inventory_hostname'] }}"
with_items: "{{ mon_socket.results }}"
run_once: true
when:
- not containerized_deployment | bool
- item.rc is defined
- item.rc == 0
- name: set_fact running_mon - container
set_fact:
running_mon: "{{ item.item }}"
run_once: true
with_items: "{{ find_running_mon_container.results }}"
when:
- containerized_deployment | bool
- item.stdout_lines | default([]) | length > 0
- name: set_fact _container_exec_cmd
set_fact:
_container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
# this task shouldn't run in a rolling_update situation
# because it blindly picks a mon, which may be down because
# of the rolling update
- name: get current fsid if cluster is already running
command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid"
changed_when: false
failed_when: false
check_mode: no
register: current_fsid
run_once: true
delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}"
when:
- not rolling_update | bool
# set this as a default when performing a rolling_update
# so the rest of the tasks here will succeed
- name: set_fact current_fsid rc 1
set_fact:
current_fsid:
rc: 1
when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0
- name: get current fsid
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
facts: add a retry on get current fsid task sometimes it can happen the following task fails: ``` TASK [ceph-facts : get current fsid] ******************************************* task path: /home/jenkins-build/build/workspace/ceph-ansible-prs-dev-centos-container-update/roles/ceph-facts/tasks/facts.yml:78 Wednesday 19 June 2019 18:12:49 +0000 (0:00:00.203) 0:02:39.995 ******** fatal: [mon2 -> mon1]: FAILED! => changed=true cmd: - timeout - --foreground - -s - KILL - 600s - docker - exec - ceph-mon-mon1 - ceph - --cluster - ceph - daemon - mon.mon1 - config - get - fsid delta: '0:00:00.239339' end: '2019-06-19 18:12:49.812099' msg: non-zero return code rc: 22 start: '2019-06-19 18:12:49.572760' stderr: 'admin_socket: exception getting command descriptions: [Errno 2] No such file or directory' stderr_lines: <omitted> stdout: '' stdout_lines: <omitted> ``` not sure exactly why since just before this task, mon1 seems to be well UP otherwise it wouldn't have passed the task `waiting for the containerized monitor to join the quorum`. As a quick fix/workaround, let's add a retry which allows us to get around this situation: ``` TASK [ceph-facts : get current fsid] ******************************************* task path: /home/jenkins-build/build/workspace/ceph-ansible-scenario/roles/ceph-facts/tasks/facts.yml:78 Thursday 20 June 2019 15:35:07 +0000 (0:00:00.201) 0:03:47.288 ********* FAILED - RETRYING: get current fsid (3 retries left). changed: [mon2 -> mon1] => changed=true attempts: 2 cmd: - timeout - --foreground - -s - KILL - 600s - docker - exec - ceph-mon-mon1 - ceph - --cluster - ceph - daemon - mon.mon1 - config - get - fsid delta: '0:00:00.290252' end: '2019-06-20 15:35:13.960188' rc: 0 start: '2019-06-20 15:35:13.669936' stderr: '' stderr_lines: <omitted> stdout: |- { "fsid": "153e159d-7ade-42a7-842c-4d04348b901e" } stdout_lines: <omitted> ``` Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
2019-06-20 20:45:07 +08:00
until: rolling_update_fsid is succeeded
when:
- rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
- name: set_fact fsid
set_fact:
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
when:
- rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
- name: set_fact fsid from current_fsid
set_fact:
fsid: "{{ current_fsid.stdout }}"
run_once: true
when: current_fsid.rc == 0
- name: fsid related tasks
when:
- generate_fsid | bool
- current_fsid.rc != 0
- not rolling_update | bool
block:
- name: generate cluster fsid
command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'"
register: cluster_uuid
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: set_fact fsid
set_fact:
fsid: "{{ cluster_uuid.stdout }}"
- name: resolve device link(s)
command: readlink -f {{ item }}
changed_when: false
check_mode: no
with_items: "{{ devices }}"
register: devices_prepare_canonicalize
when:
- devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact build devices from resolved symlinks
set_fact:
devices: "{{ devices | default([]) + [ item.stdout ] }}"
with_items: "{{ devices_prepare_canonicalize.results }}"
when:
- devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact build final devices list
set_fact:
devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
when:
- devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: resolve dedicated_device link(s)
command: readlink -f {{ item }}
changed_when: false
check_mode: no
with_items: "{{ dedicated_devices }}"
register: dedicated_devices_prepare_canonicalize
when:
- dedicated_devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact build dedicated_devices from resolved symlinks
set_fact:
dedicated_devices: "{{ dedicated_devices | default([]) + [ item.stdout ] }}"
with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
when:
- dedicated_devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact build final dedicated_devices list
set_fact:
dedicated_devices: "{{ dedicated_devices | reject('search','/dev/disk') | list | unique }}"
when:
- dedicated_devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: resolve bluestore_wal_device link(s)
command: readlink -f {{ item }}
changed_when: false
check_mode: no
with_items: "{{ bluestore_wal_devices }}"
register: bluestore_wal_devices_prepare_canonicalize
when:
- bluestore_wal_devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact build bluestore_wal_devices from resolved symlinks
set_fact:
bluestore_wal_devices: "{{ bluestore_wal_devices | default([]) + [ item.stdout ] }}"
with_items: "{{ bluestore_wal_devices_prepare_canonicalize.results }}"
when:
- bluestore_wal_devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact build final bluestore_wal_devices list
set_fact:
bluestore_wal_devices: "{{ bluestore_wal_devices | reject('search','/dev/disk') | list | unique }}"
when:
- bluestore_wal_devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- not osd_auto_discovery | default(False) | bool
- name: set_fact devices generate device list when osd_auto_discovery
set_fact:
devices: "{{ (devices | default([]) + [ item.key | regex_replace('^', '/dev/') ]) | unique }}"
with_dict: "{{ ansible_facts['devices'] }}"
when:
- osd_auto_discovery | default(False) | bool
- inventory_hostname in groups.get(osd_group_name, [])
- ansible_facts['devices'] is defined
- item.value.removable == "0"
- item.value.sectors != "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- item.key is not match osd_auto_discovery_exclude
- name: backward compatibility tasks related
when:
- (inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []))
- groups.get(mon_group_name, []) | length > 0
block:
- name: get ceph current status
command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
changed_when: false
failed_when: false
check_mode: no
register: ceph_current_status
run_once: true
delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}"
- name: set_fact ceph_current_status
set_fact:
ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
run_once: true
when: ceph_current_status.rc == 0
- name: set_fact rgw_hostname
set_fact:
rgw_hostname: "{% set _value = ansible_facts['hostname'] -%}
{% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
{% if key == ansible_facts['fqdn'] -%}
{% set _value = key -%}
{% endif -%}
{% endfor -%}
{{ _value }}"
when:
- ceph_current_status['services'] is defined
- ceph_current_status['services']['rgw'] is defined
- name: check if the ceph conf exists
stat:
path: '/etc/ceph/{{ cluster }}.conf'
register: ceph_conf
- name: set default osd_pool_default_crush_rule fact
set_fact:
osd_pool_default_crush_rule: "{{ ceph_osd_pool_default_crush_rule }}"
- name: get default crush rule value from ceph configuration
block:
- &read-osd-pool-default-crush-rule
name: read osd pool default crush rule
command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf
register: crush_rule_variable
changed_when: false
check_mode: no
failed_when: crush_rule_variable.rc not in (0, 1)
- &set-osd-pool-default-crush-rule-fact
name: set osd_pool_default_crush_rule fact
set_fact:
osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] }}"
when: crush_rule_variable.rc == 0
when: ceph_conf.stat.exists | bool
- name: get default crush rule value from running monitor ceph configuration
block:
- <<: *read-osd-pool-default-crush-rule
delegate_to: "{{ running_mon }}"
- *set-osd-pool-default-crush-rule-fact
when:
- running_mon is defined
- not ceph_conf.stat.exists | bool
- name: import_tasks set_monitor_address.yml
import_tasks: set_monitor_address.yml
when: groups.get(mon_group_name, []) | length > 0
- name: import_tasks set_radosgw_address.yml
import_tasks: set_radosgw_address.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
set_fact:
use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
when: iscsi_gw_group_name in group_names
- name: set_fact ceph_run_cmd
set_fact:
ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
delegate_to: "{{ item }}"
delegate_facts: True
run_once: True
with_items:
- "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}"
- name: set_fact ceph_admin_command
set_fact:
ceph_admin_command: "{{ hostvars[item]['ceph_run_cmd'] }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"
delegate_to: "{{ item }}"
delegate_facts: True
run_once: True
with_items:
- "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}"