mirror of https://github.com/ceph/ceph-ansible.git
ansible: use 'bool' filter on boolean conditionals
By running ceph-ansible there are a lot ``[DEPRECATION WARNING]`` like these: ``` [DEPRECATION WARNING]: evaluating containerized_deployment as a bare variable, this behaviour will go away and you might need to add |bool to the expression in the future. Also see CONDITIONAL_BARE_VARS configuration toggle.. This feature will be removed in version 2.12. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ``` Now appended ``| bool`` on a lot of the affected variables. Sometimes the coding style from ``variable|bool`` changed to ``variable | bool`` *(with spaces at the pipe)*. Closes: #4022 Signed-off-by: L3D <l3d@c3woc.de>pull/4058/head
parent
518ab794fb
commit
ab54fe20ec
|
@ -365,7 +365,7 @@
|
||||||
- wait for server to boot
|
- wait for server to boot
|
||||||
- remove data
|
- remove data
|
||||||
when:
|
when:
|
||||||
- reboot_osd_node
|
- reboot_osd_node | bool
|
||||||
- remove_osd_mountpoints.failed is defined
|
- remove_osd_mountpoints.failed is defined
|
||||||
|
|
||||||
- name: wipe table on dm-crypt devices
|
- name: wipe table on dm-crypt devices
|
||||||
|
@ -633,7 +633,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- ansible_pkg_mgr == 'yum'
|
- ansible_pkg_mgr == 'yum'
|
||||||
- purge_all_packages == true
|
- purge_all_packages | bool
|
||||||
|
|
||||||
- name: purge remaining ceph packages with dnf
|
- name: purge remaining ceph packages with dnf
|
||||||
dnf:
|
dnf:
|
||||||
|
@ -641,7 +641,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- ansible_pkg_mgr == 'dnf'
|
- ansible_pkg_mgr == 'dnf'
|
||||||
- purge_all_packages == true
|
- purge_all_packages | bool
|
||||||
|
|
||||||
- name: purge remaining ceph packages with apt
|
- name: purge remaining ceph packages with apt
|
||||||
apt:
|
apt:
|
||||||
|
@ -649,7 +649,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- ansible_pkg_mgr == 'apt'
|
- ansible_pkg_mgr == 'apt'
|
||||||
- purge_all_packages == true
|
- purge_all_packages | bool
|
||||||
|
|
||||||
- name: remove config
|
- name: remove config
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -84,7 +84,7 @@
|
||||||
file:
|
file:
|
||||||
path: /etc/profile.d/ceph-aliases.sh
|
path: /etc/profile.d/ceph-aliases.sh
|
||||||
state: absent
|
state: absent
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: set mon_host_count
|
- name: set mon_host_count
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -114,7 +114,7 @@
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
with_items: "{{ groups[mon_group_name] }}"
|
with_items: "{{ groups[mon_group_name] }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- inventory_hostname == groups[mon_group_name][0]
|
- inventory_hostname == groups[mon_group_name][0]
|
||||||
|
|
||||||
- name: create potentially missing keys (rbd and rbd-mirror)
|
- name: create potentially missing keys (rbd and rbd-mirror)
|
||||||
|
@ -133,7 +133,7 @@
|
||||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- inventory_hostname == groups[mon_group_name][0]
|
- inventory_hostname == groups[mon_group_name][0]
|
||||||
|
|
||||||
# NOTE: we mask the service so the RPM can't restart it
|
# NOTE: we mask the service so the RPM can't restart it
|
||||||
|
@ -145,7 +145,7 @@
|
||||||
enabled: no
|
enabled: no
|
||||||
masked: yes
|
masked: yes
|
||||||
ignore_errors: True
|
ignore_errors: True
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
# NOTE: we mask the service so the RPM can't restart it
|
# NOTE: we mask the service so the RPM can't restart it
|
||||||
# after the package gets upgraded
|
# after the package gets upgraded
|
||||||
|
@ -156,7 +156,7 @@
|
||||||
enabled: no
|
enabled: no
|
||||||
masked: yes
|
masked: yes
|
||||||
ignore_errors: True
|
ignore_errors: True
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
# only mask the service for mgr because it must be upgraded
|
# only mask the service for mgr because it must be upgraded
|
||||||
# after ALL monitors, even when collocated
|
# after ALL monitors, even when collocated
|
||||||
|
@ -175,7 +175,7 @@
|
||||||
delegate_to: "{{ mon_host }}"
|
delegate_to: "{{ mon_host }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups[mon_group_name][0]
|
- inventory_hostname == groups[mon_group_name][0]
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
|
|
||||||
- name: set containerized osd flags
|
- name: set containerized osd flags
|
||||||
command: >
|
command: >
|
||||||
|
@ -186,16 +186,16 @@
|
||||||
delegate_to: "{{ mon_host }}"
|
delegate_to: "{{ mon_host }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups[mon_group_name][0]
|
- inventory_hostname == groups[mon_group_name][0]
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -206,7 +206,7 @@
|
||||||
name: ceph-mon@{{ monitor_name }}
|
name: ceph-mon@{{ monitor_name }}
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: start ceph mgr
|
- name: start ceph mgr
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -214,7 +214,7 @@
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
ignore_errors: True # if no mgr collocated with mons
|
ignore_errors: True # if no mgr collocated with mons
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: restart containerized ceph mon
|
- name: restart containerized ceph mon
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -222,7 +222,7 @@
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: yes
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: non container | waiting for the monitor to join the quorum...
|
- name: non container | waiting for the monitor to join the quorum...
|
||||||
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
|
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
|
||||||
|
@ -233,7 +233,7 @@
|
||||||
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
|
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
|
||||||
retries: "{{ health_mon_check_retries }}"
|
retries: "{{ health_mon_check_retries }}"
|
||||||
delay: "{{ health_mon_check_delay }}"
|
delay: "{{ health_mon_check_delay }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: container | waiting for the containerized monitor to join the quorum...
|
- name: container | waiting for the containerized monitor to join the quorum...
|
||||||
command: >
|
command: >
|
||||||
|
@ -245,7 +245,7 @@
|
||||||
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
|
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
|
||||||
retries: "{{ health_mon_check_retries }}"
|
retries: "{{ health_mon_check_retries }}"
|
||||||
delay: "{{ health_mon_check_delay }}"
|
delay: "{{ health_mon_check_delay }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: upgrade ceph mgr nodes when implicitly collocated on monitors
|
- name: upgrade ceph mgr nodes when implicitly collocated on monitors
|
||||||
vars:
|
vars:
|
||||||
|
@ -274,10 +274,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -310,10 +310,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -334,19 +334,18 @@
|
||||||
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
|
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
|
||||||
register: osd_ids
|
register: osd_ids
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: get osd unit names - container
|
- name: get osd unit names - container
|
||||||
shell: systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([a-z0-9]+).service"
|
shell: systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([a-z0-9]+).service"
|
||||||
register: osd_names
|
register: osd_names
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: set num_osds for container
|
- name: set num_osds for container
|
||||||
set_fact:
|
set_fact:
|
||||||
num_osds: "{{ osd_names.stdout_lines|default([])|length }}"
|
num_osds: "{{ osd_names.stdout_lines|default([])|length }}"
|
||||||
when:
|
when: containerized_deployment | bool
|
||||||
- containerized_deployment
|
|
||||||
|
|
||||||
- name: stop ceph osd
|
- name: stop ceph osd
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -355,13 +354,12 @@
|
||||||
enabled: no
|
enabled: no
|
||||||
masked: yes
|
masked: yes
|
||||||
with_items: "{{ osd_ids.stdout_lines }}"
|
with_items: "{{ osd_ids.stdout_lines }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: set num_osds for non container
|
- name: set num_osds for non container
|
||||||
set_fact:
|
set_fact:
|
||||||
num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
|
num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
|
||||||
when:
|
when: not containerized_deployment | bool
|
||||||
- not containerized_deployment
|
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-defaults
|
name: ceph-defaults
|
||||||
|
@ -371,10 +369,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -384,7 +382,7 @@
|
||||||
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
|
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
|
||||||
register: osd_ids
|
register: osd_ids
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: start ceph osd
|
- name: start ceph osd
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -393,7 +391,7 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
with_items: "{{ osd_ids.stdout_lines }}"
|
with_items: "{{ osd_ids.stdout_lines }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: restart containerized ceph osd
|
- name: restart containerized ceph osd
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -403,7 +401,7 @@
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
with_items: "{{ osd_names.stdout_lines }}"
|
with_items: "{{ osd_names.stdout_lines }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: scan ceph-disk osds with ceph-volume if deploying nautilus
|
- name: scan ceph-disk osds with ceph-volume if deploying nautilus
|
||||||
command: "ceph-volume --cluster={{ cluster }} simple scan"
|
command: "ceph-volume --cluster={{ cluster }} simple scan"
|
||||||
|
@ -411,7 +409,7 @@
|
||||||
CEPH_VOLUME_DEBUG: 1
|
CEPH_VOLUME_DEBUG: 1
|
||||||
when:
|
when:
|
||||||
- ceph_release in ["nautilus", "octopus"]
|
- ceph_release in ["nautilus", "octopus"]
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
|
|
||||||
- name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
|
- name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
|
||||||
command: "ceph-volume --cluster={{ cluster }} simple activate --all"
|
command: "ceph-volume --cluster={{ cluster }} simple activate --all"
|
||||||
|
@ -419,12 +417,12 @@
|
||||||
CEPH_VOLUME_DEBUG: 1
|
CEPH_VOLUME_DEBUG: 1
|
||||||
when:
|
when:
|
||||||
- ceph_release in ["nautilus", "octopus"]
|
- ceph_release in ["nautilus", "octopus"]
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
|
|
||||||
- name: set_fact container_exec_cmd_osd
|
- name: set_fact container_exec_cmd_osd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: get osd versions
|
- name: get osd versions
|
||||||
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
|
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
|
||||||
|
@ -478,7 +476,7 @@
|
||||||
- name: set_fact container_exec_cmd_osd
|
- name: set_fact container_exec_cmd_osd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: unset osd flags
|
- name: unset osd flags
|
||||||
command: "{{ container_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
|
command: "{{ container_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
|
||||||
|
@ -519,7 +517,7 @@
|
||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
masked: yes
|
masked: yes
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-defaults
|
name: ceph-defaults
|
||||||
|
@ -529,10 +527,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -544,7 +542,7 @@
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: restart ceph mds
|
- name: restart ceph mds
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -553,7 +551,7 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
|
|
||||||
- name: upgrade ceph rgws cluster
|
- name: upgrade ceph rgws cluster
|
||||||
|
@ -584,16 +582,16 @@
|
||||||
enabled: no
|
enabled: no
|
||||||
masked: yes
|
masked: yes
|
||||||
with_items: "{{ rgw_instances }}"
|
with_items: "{{ rgw_instances }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -607,7 +605,7 @@
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
with_items: "{{ rgw_instances }}"
|
with_items: "{{ rgw_instances }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
|
|
||||||
- name: upgrade ceph rbd mirror node
|
- name: upgrade ceph rbd mirror node
|
||||||
|
@ -632,10 +630,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -647,7 +645,7 @@
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: restart containerized ceph rbd mirror
|
- name: restart containerized ceph rbd mirror
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -656,7 +654,7 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
|
|
||||||
- name: upgrade ceph nfs node
|
- name: upgrade ceph nfs node
|
||||||
|
@ -676,7 +674,7 @@
|
||||||
enabled: no
|
enabled: no
|
||||||
masked: yes
|
masked: yes
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-defaults
|
name: ceph-defaults
|
||||||
|
@ -686,10 +684,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -702,8 +700,8 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ceph_nfs_enable_service
|
- ceph_nfs_enable_service | bool
|
||||||
|
|
||||||
- name: systemd restart nfs container
|
- name: systemd restart nfs container
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -713,8 +711,8 @@
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
when:
|
when:
|
||||||
- ceph_nfs_enable_service
|
- ceph_nfs_enable_service | bool
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
|
|
||||||
|
|
||||||
- name: upgrade ceph iscsi gateway node
|
- name: upgrade ceph iscsi gateway node
|
||||||
|
@ -740,7 +738,7 @@
|
||||||
- rbd-target-api
|
- rbd-target-api
|
||||||
- rbd-target-gw
|
- rbd-target-gw
|
||||||
- tcmu-runner
|
- tcmu-runner
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-defaults
|
name: ceph-defaults
|
||||||
|
@ -750,10 +748,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -769,7 +767,7 @@
|
||||||
- tcmu-runner
|
- tcmu-runner
|
||||||
- rbd-target-api
|
- rbd-target-api
|
||||||
- rbd-target-gw
|
- rbd-target-gw
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
|
|
||||||
- name: upgrade ceph client node
|
- name: upgrade ceph client node
|
||||||
|
@ -787,10 +785,10 @@
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-common
|
name: ceph-common
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
- import_role:
|
- import_role:
|
||||||
|
@ -809,25 +807,25 @@
|
||||||
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph osd require-osd-release nautilus"
|
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph osd require-osd-release nautilus"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: non container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality
|
- name: non container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality
|
||||||
command: "ceph --cluster {{ cluster }} osd require-osd-release nautilus"
|
command: "ceph --cluster {{ cluster }} osd require-osd-release nautilus"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: container | enable msgr2 protocol
|
- name: container | enable msgr2 protocol
|
||||||
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph mon enable-msgr2"
|
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph mon enable-msgr2"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: non container | enable msgr2 protocol
|
- name: non container | enable msgr2 protocol
|
||||||
command: "ceph --cluster {{ cluster }} mon enable-msgr2"
|
command: "ceph --cluster {{ cluster }} mon enable-msgr2"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-handler
|
name: ceph-handler
|
||||||
|
@ -848,7 +846,7 @@
|
||||||
- name: set_fact container_exec_cmd_status
|
- name: set_fact container_exec_cmd_status
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: show ceph status
|
- name: show ceph status
|
||||||
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
|
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
|
||||||
|
|
|
@ -77,7 +77,7 @@
|
||||||
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
|
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: exit playbook, if can not connect to the cluster
|
- name: exit playbook, if can not connect to the cluster
|
||||||
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||||
|
|
|
@ -65,7 +65,7 @@
|
||||||
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: exit playbook, if can not connect to the cluster
|
- name: exit playbook, if can not connect to the cluster
|
||||||
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||||
|
|
|
@ -73,7 +73,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
osds_dir_stat.results
|
osds_dir_stat.results
|
||||||
when:
|
when:
|
||||||
- osds_dir_stat is defined
|
- osds_dir_stat is defined | bool
|
||||||
- item.stat.exists == false
|
- item.stat.exists == false
|
||||||
|
|
||||||
- name: install sgdisk(gdisk)
|
- name: install sgdisk(gdisk)
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: exit playbook, if can not connect to the cluster
|
- name: exit playbook, if can not connect to the cluster
|
||||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||||
|
@ -89,14 +89,14 @@
|
||||||
with_items: "{{ osd_hosts }}"
|
with_items: "{{ osd_hosts }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: fail when admin key is not present
|
- name: fail when admin key is not present
|
||||||
fail:
|
fail:
|
||||||
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
||||||
with_items: "{{ ceph_admin_key.results }}"
|
with_items: "{{ ceph_admin_key.results }}"
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- item.stat.exists == false
|
- item.stat.exists == false
|
||||||
|
|
||||||
# NOTE(leseb): using '>' is the only way I could have the command working
|
# NOTE(leseb): using '>' is the only way I could have the command working
|
||||||
|
@ -110,7 +110,7 @@
|
||||||
- "{{ osd_to_replace.split(',') }}"
|
- "{{ osd_to_replace.split(',') }}"
|
||||||
register: osd_to_replace_disks
|
register: osd_to_replace_disks
|
||||||
delegate_to: "{{ item.0 }}"
|
delegate_to: "{{ item.0 }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: zapping osd(s) - container
|
- name: zapping osd(s) - container
|
||||||
shell: >
|
shell: >
|
||||||
|
@ -122,7 +122,7 @@
|
||||||
- "{{ osd_hosts }}"
|
- "{{ osd_hosts }}"
|
||||||
- "{{ osd_to_replace_disks.results }}"
|
- "{{ osd_to_replace_disks.results }}"
|
||||||
delegate_to: "{{ item.0 }}"
|
delegate_to: "{{ item.0 }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: zapping osd(s) - non container
|
- name: zapping osd(s) - non container
|
||||||
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
|
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
|
||||||
|
@ -131,7 +131,7 @@
|
||||||
- "{{ osd_hosts }}"
|
- "{{ osd_hosts }}"
|
||||||
- "{{ osd_to_replace_disks.results }}"
|
- "{{ osd_to_replace_disks.results }}"
|
||||||
delegate_to: "{{ item.0 }}"
|
delegate_to: "{{ item.0 }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: destroying osd(s)
|
- name: destroying osd(s)
|
||||||
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
|
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
|
||||||
|
@ -140,7 +140,7 @@
|
||||||
- "{{ osd_hosts }}"
|
- "{{ osd_hosts }}"
|
||||||
- "{{ osd_to_replace.split(',') }}"
|
- "{{ osd_to_replace.split(',') }}"
|
||||||
delegate_to: "{{ item.0 }}"
|
delegate_to: "{{ item.0 }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: replace osd(s) - prepare - non container
|
- name: replace osd(s) - prepare - non container
|
||||||
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
|
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
|
||||||
|
|
|
@ -56,7 +56,7 @@ EXAMPLES = '''
|
||||||
location: "{{ hostvars[item]['osd_crush_location'] }}"
|
location: "{{ hostvars[item]['osd_crush_location'] }}"
|
||||||
containerized: "{{ container_exec_cmd }}"
|
containerized: "{{ container_exec_cmd }}"
|
||||||
with_items: "{{ groups[osd_group_name] }}"
|
with_items: "{{ groups[osd_group_name] }}"
|
||||||
when: crush_rule_config
|
when: crush_rule_config | bool
|
||||||
'''
|
'''
|
||||||
|
|
||||||
RETURN = '''# '''
|
RETURN = '''# '''
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
groups: _filtered_clients
|
groups: _filtered_clients
|
||||||
with_items: "{{ groups[client_group_name] }}"
|
with_items: "{{ groups[client_group_name] }}"
|
||||||
when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
|
when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment | bool)
|
||||||
|
|
||||||
- name: set_fact delegated_node
|
- name: set_fact delegated_node
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -50,7 +50,7 @@
|
||||||
- "{{ ceph_nfs_ceph_user | default([]) }}"
|
- "{{ ceph_nfs_ceph_user | default([]) }}"
|
||||||
delegate_to: "{{ delegated_node }}"
|
delegate_to: "{{ delegated_node }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- keys | length > 0
|
- keys | length > 0
|
||||||
- inventory_hostname == groups.get('_filtered_clients') | first
|
- inventory_hostname == groups.get('_filtered_clients') | first
|
||||||
|
|
||||||
|
@ -61,13 +61,13 @@
|
||||||
register: slurp_client_keys
|
register: slurp_client_keys
|
||||||
delegate_to: "{{ delegated_node }}"
|
delegate_to: "{{ delegated_node }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- keys | length > 0
|
- keys | length > 0
|
||||||
- inventory_hostname == groups.get('_filtered_clients') | first
|
- inventory_hostname == groups.get('_filtered_clients') | first
|
||||||
|
|
||||||
- name: pool related tasks
|
- name: pool related tasks
|
||||||
when:
|
when:
|
||||||
- condition_copy_admin_key
|
- condition_copy_admin_key | bool
|
||||||
- inventory_hostname == groups.get('_filtered_clients', []) | first
|
- inventory_hostname == groups.get('_filtered_clients', []) | first
|
||||||
block:
|
block:
|
||||||
- name: list existing pool(s)
|
- name: list existing pool(s)
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
|
|
||||||
- name: include create_users_keys.yml
|
- name: include create_users_keys.yml
|
||||||
include_tasks: create_users_keys.yml
|
include_tasks: create_users_keys.yml
|
||||||
when: user_config
|
when: user_config | bool
|
||||||
|
|
|
@ -7,5 +7,5 @@
|
||||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
mode: "{{ ceph_keyring_permissions }}"
|
mode: "{{ ceph_keyring_permissions }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- copy_admin_key
|
- copy_admin_key | bool
|
||||||
|
|
|
@ -9,4 +9,4 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ rbd_client_admin_socket_path }}"
|
- "{{ rbd_client_admin_socket_path }}"
|
||||||
- "{{ rbd_client_log_path }}"
|
- "{{ rbd_client_log_path }}"
|
||||||
when: rbd_client_directories
|
when: rbd_client_directories | bool
|
||||||
|
|
|
@ -3,25 +3,25 @@
|
||||||
file:
|
file:
|
||||||
path: /tmp
|
path: /tmp
|
||||||
state: directory
|
state: directory
|
||||||
when: use_installer
|
when: use_installer | bool
|
||||||
|
|
||||||
- name: use mktemp to create name for rundep
|
- name: use mktemp to create name for rundep
|
||||||
command: "mktemp /tmp/rundep.XXXXXXXX"
|
command: "mktemp /tmp/rundep.XXXXXXXX"
|
||||||
register: rundep_location
|
register: rundep_location
|
||||||
when: use_installer
|
when: use_installer | bool
|
||||||
|
|
||||||
- name: copy rundep
|
- name: copy rundep
|
||||||
copy:
|
copy:
|
||||||
src: "{{ ansible_dir }}/rundep"
|
src: "{{ ansible_dir }}/rundep"
|
||||||
dest: "{{ item }}"
|
dest: "{{ item }}"
|
||||||
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
|
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
|
||||||
when: use_installer
|
when: use_installer | bool
|
||||||
|
|
||||||
- name: install ceph dependencies
|
- name: install ceph dependencies
|
||||||
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
|
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
|
||||||
become: true
|
become: true
|
||||||
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
|
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
|
||||||
when: use_installer
|
when: use_installer | bool
|
||||||
|
|
||||||
- name: ensure rsync is installed
|
- name: ensure rsync is installed
|
||||||
package:
|
package:
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
---
|
---
|
||||||
- name: include create_ceph_initial_dirs.yml
|
- name: include create_ceph_initial_dirs.yml
|
||||||
include_tasks: create_ceph_initial_dirs.yml
|
include_tasks: create_ceph_initial_dirs.yml
|
||||||
when: containerized_deployment|bool
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: config file operations related to OSDs
|
- name: config file operations related to OSDs
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
# the rolling_update.yml playbook sets num_osds to the number of currently
|
# the rolling_update.yml playbook sets num_osds to the number of currently
|
||||||
# running osds
|
# running osds
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
block:
|
block:
|
||||||
- name: count number of osds for lvm scenario
|
- name: count number of osds for lvm scenario
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -62,7 +62,7 @@
|
||||||
|
|
||||||
# ceph-common
|
# ceph-common
|
||||||
- name: config file operation for non-containerized scenarios
|
- name: config file operation for non-containerized scenarios
|
||||||
when: not containerized_deployment|bool
|
when: not containerized_deployment | bool
|
||||||
block:
|
block:
|
||||||
- name: create ceph conf directory
|
- name: create ceph conf directory
|
||||||
file:
|
file:
|
||||||
|
@ -98,7 +98,7 @@
|
||||||
state: directory
|
state: directory
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
when: ceph_conf_local
|
when: ceph_conf_local | bool
|
||||||
|
|
||||||
- name: "generate {{ cluster }}.conf configuration file locally"
|
- name: "generate {{ cluster }}.conf configuration file locally"
|
||||||
config_template:
|
config_template:
|
||||||
|
@ -112,10 +112,10 @@
|
||||||
config_type: ini
|
config_type: ini
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups[mon_group_name]
|
- inventory_hostname in groups[mon_group_name]
|
||||||
- ceph_conf_local
|
- ceph_conf_local | bool
|
||||||
|
|
||||||
- name: config file operations for containerized scenarios
|
- name: config file operations for containerized scenarios
|
||||||
when: containerized_deployment|bool
|
when: containerized_deployment | bool
|
||||||
block:
|
block:
|
||||||
- name: create a local fetch directory if it does not exist
|
- name: create a local fetch directory if it does not exist
|
||||||
file:
|
file:
|
||||||
|
@ -126,7 +126,7 @@
|
||||||
become: false
|
become: false
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- (cephx or generate_fsid)
|
- (cephx or generate_fsid) | bool
|
||||||
- ((inventory_hostname in groups.get(mon_group_name, [])) or
|
- ((inventory_hostname in groups.get(mon_group_name, [])) or
|
||||||
(groups.get(nfs_group_name, []) | length > 0) and inventory_hostname == groups.get(nfs_group_name, [])[0])
|
(groups.get(nfs_group_name, []) | length > 0) and inventory_hostname == groups.get(nfs_group_name, [])[0])
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@
|
||||||
until: docker_image.rc == 0
|
until: docker_image.rc == 0
|
||||||
retries: "{{ docker_pull_retry }}"
|
retries: "{{ docker_pull_retry }}"
|
||||||
delay: 10
|
delay: 10
|
||||||
when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
|
when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image | bool)
|
||||||
|
|
||||||
- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
|
- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
|
||||||
command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
|
@ -278,15 +278,15 @@
|
||||||
copy:
|
copy:
|
||||||
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||||
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
|
||||||
|
|
||||||
- name: load ceph dev image
|
- name: load ceph dev image
|
||||||
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
|
||||||
|
|
||||||
- name: remove tmp ceph dev image file
|
- name: remove tmp ceph dev image file
|
||||||
file:
|
file:
|
||||||
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||||
state: absent
|
state: absent
|
||||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
when:
|
when:
|
||||||
- ansible_distribution == 'CentOS'
|
- ansible_distribution == 'CentOS'
|
||||||
- ceph_docker_enable_centos_extra_repo
|
- ceph_docker_enable_centos_extra_repo | bool
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: disable SSL for dashboard
|
- name: disable SSL for dashboard
|
||||||
command: "{{ container_exec_cmd }} ceph config set mgr mgr/dashboard/ssl false"
|
command: "{{ container_exec_cmd }} ceph config set mgr mgr/dashboard/ssl false"
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0440
|
mode: 0440
|
||||||
when:
|
when:
|
||||||
- dashboard_crt
|
- dashboard_crt | bool
|
||||||
- dashboard_protocol == "https"
|
- dashboard_protocol == "https"
|
||||||
|
|
||||||
- name: copy dashboard SSL certificate key
|
- name: copy dashboard SSL certificate key
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0440
|
mode: 0440
|
||||||
when:
|
when:
|
||||||
- dashboard_key
|
- dashboard_key | bool
|
||||||
- dashboard_protocol == "https"
|
- dashboard_protocol == "https"
|
||||||
|
|
||||||
- name: generate a Self Signed OpenSSL certificate for dashboard
|
- name: generate a Self Signed OpenSSL certificate for dashboard
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-dashboard' -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
|
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-dashboard' -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
|
||||||
when:
|
when:
|
||||||
- dashboard_protocol == "https"
|
- dashboard_protocol == "https"
|
||||||
- not dashboard_key or not dashboard_crt
|
- not dashboard_key | bool or not dashboard_crt | bool
|
||||||
|
|
||||||
- name: import dashboard certificate file
|
- name: import dashboard certificate file
|
||||||
command: "{{ container_exec_cmd }} ceph config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
|
command: "{{ container_exec_cmd }} ceph config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
|
||||||
|
@ -131,25 +131,25 @@
|
||||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-host {{ dashboard_rgw_api_host }}"
|
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-host {{ dashboard_rgw_api_host }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: dashboard_rgw_api_host
|
when: dashboard_rgw_api_host | bool
|
||||||
|
|
||||||
- name: set the rgw port
|
- name: set the rgw port
|
||||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-port {{ dashboard_rgw_api_port }}"
|
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-port {{ dashboard_rgw_api_port }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: dashboard_rgw_api_port
|
when: dashboard_rgw_api_port | bool
|
||||||
|
|
||||||
- name: set the rgw scheme
|
- name: set the rgw scheme
|
||||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-scheme {{ dashboard_rgw_api_scheme }}"
|
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-scheme {{ dashboard_rgw_api_scheme }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: dashboard_rgw_api_scheme
|
when: dashboard_rgw_api_scheme | bool
|
||||||
|
|
||||||
- name: set the rgw admin resource
|
- name: set the rgw admin resource
|
||||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
|
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: dashboard_rgw_api_admin_resource
|
when: dashboard_rgw_api_admin_resource | bool
|
||||||
|
|
||||||
- name: disable ssl verification for rgw
|
- name: disable ssl verification for rgw
|
||||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-ssl-verify False"
|
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-ssl-verify False"
|
||||||
|
|
|
@ -29,18 +29,18 @@
|
||||||
- name: set_fact monitor_name ansible_hostname
|
- name: set_fact monitor_name ansible_hostname
|
||||||
set_fact:
|
set_fact:
|
||||||
monitor_name: "{{ ansible_hostname }}"
|
monitor_name: "{{ ansible_hostname }}"
|
||||||
when: not mon_use_fqdn
|
when: not mon_use_fqdn | bool
|
||||||
|
|
||||||
- name: set_fact monitor_name ansible_fqdn
|
- name: set_fact monitor_name ansible_fqdn
|
||||||
set_fact:
|
set_fact:
|
||||||
monitor_name: "{{ ansible_fqdn }}"
|
monitor_name: "{{ ansible_fqdn }}"
|
||||||
when: mon_use_fqdn
|
when: mon_use_fqdn | bool
|
||||||
|
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- groups.get(mon_group_name, []) | length > 0
|
- groups.get(mon_group_name, []) | length > 0
|
||||||
|
|
||||||
# this task shouldn't run in a rolling_update situation
|
# this task shouldn't run in a rolling_update situation
|
||||||
|
@ -55,7 +55,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when:
|
when:
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
- groups.get(mon_group_name, []) | length > 0
|
- groups.get(mon_group_name, []) | length > 0
|
||||||
|
|
||||||
# set this as a default when performing a rolling_update
|
# set this as a default when performing a rolling_update
|
||||||
|
@ -73,24 +73,24 @@
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
changed_when: false
|
changed_when: false
|
||||||
become: false
|
become: false
|
||||||
when: cephx or generate_fsid
|
when: cephx | bool or generate_fsid | bool
|
||||||
|
|
||||||
- name: get current fsid
|
- name: get current fsid
|
||||||
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
|
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
|
||||||
register: rolling_update_fsid
|
register: rolling_update_fsid
|
||||||
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
|
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
|
||||||
when: rolling_update
|
when: rolling_update | bool
|
||||||
|
|
||||||
- name: set_fact fsid
|
- name: set_fact fsid
|
||||||
set_fact:
|
set_fact:
|
||||||
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
|
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
|
||||||
when: rolling_update
|
when: rolling_update | bool
|
||||||
|
|
||||||
- name: set_fact ceph_current_status (convert to json)
|
- name: set_fact ceph_current_status (convert to json)
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
|
ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
|
||||||
when:
|
when:
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
- ceph_current_status.rc == 0
|
- ceph_current_status.rc == 0
|
||||||
|
|
||||||
- name: set_fact fsid from ceph_current_status
|
- name: set_fact fsid from ceph_current_status
|
||||||
|
@ -100,9 +100,9 @@
|
||||||
|
|
||||||
- name: fsid realted tasks
|
- name: fsid realted tasks
|
||||||
when:
|
when:
|
||||||
- generate_fsid
|
- generate_fsid | bool
|
||||||
- ceph_current_status.fsid is undefined
|
- ceph_current_status.fsid is undefined
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
block:
|
block:
|
||||||
- name: generate cluster fsid
|
- name: generate cluster fsid
|
||||||
shell: python -c 'import uuid; print(str(uuid.uuid4()))'
|
shell: python -c 'import uuid; print(str(uuid.uuid4()))'
|
||||||
|
@ -117,12 +117,12 @@
|
||||||
- name: set_fact mds_name ansible_hostname
|
- name: set_fact mds_name ansible_hostname
|
||||||
set_fact:
|
set_fact:
|
||||||
mds_name: "{{ ansible_hostname }}"
|
mds_name: "{{ ansible_hostname }}"
|
||||||
when: not mds_use_fqdn
|
when: not mds_use_fqdn | bool
|
||||||
|
|
||||||
- name: set_fact mds_name ansible_fqdn
|
- name: set_fact mds_name ansible_fqdn
|
||||||
set_fact:
|
set_fact:
|
||||||
mds_name: "{{ ansible_fqdn }}"
|
mds_name: "{{ ansible_fqdn }}"
|
||||||
when: mds_use_fqdn
|
when: mds_use_fqdn | bool
|
||||||
|
|
||||||
- name: set_fact rbd_client_directory_owner ceph
|
- name: set_fact rbd_client_directory_owner ceph
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -151,7 +151,7 @@
|
||||||
when:
|
when:
|
||||||
- devices is defined
|
- devices is defined
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
- not osd_auto_discovery|default(False)
|
- not osd_auto_discovery | default(False) | bool
|
||||||
|
|
||||||
- name: set_fact build devices from resolved symlinks
|
- name: set_fact build devices from resolved symlinks
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -160,7 +160,7 @@
|
||||||
when:
|
when:
|
||||||
- devices is defined
|
- devices is defined
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
- not osd_auto_discovery|default(False)
|
- not osd_auto_discovery | default(False) | bool
|
||||||
|
|
||||||
- name: set_fact build final devices list
|
- name: set_fact build final devices list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -168,14 +168,14 @@
|
||||||
when:
|
when:
|
||||||
- devices is defined
|
- devices is defined
|
||||||
- inventory_hostname in groups.get(osd_group_name, [])
|
- inventory_hostname in groups.get(osd_group_name, [])
|
||||||
- not osd_auto_discovery|default(False)
|
- not osd_auto_discovery | default(False) | bool
|
||||||
|
|
||||||
- name: set_fact devices generate device list when osd_auto_discovery
|
- name: set_fact devices generate device list when osd_auto_discovery
|
||||||
set_fact:
|
set_fact:
|
||||||
devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
|
devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
|
||||||
with_dict: "{{ ansible_devices }}"
|
with_dict: "{{ ansible_devices }}"
|
||||||
when:
|
when:
|
||||||
- osd_auto_discovery|default(False)
|
- osd_auto_discovery | default(False) | bool
|
||||||
- ansible_devices is defined
|
- ansible_devices is defined
|
||||||
- item.value.removable == "0"
|
- item.value.removable == "0"
|
||||||
- item.value.sectors != "0"
|
- item.value.sectors != "0"
|
||||||
|
@ -187,28 +187,28 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_uid: 64045
|
ceph_uid: 64045
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ansible_os_family == 'Debian'
|
- ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: set_fact ceph_uid for red hat or suse based system - non container
|
- name: set_fact ceph_uid for red hat or suse based system - non container
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_uid: 167
|
ceph_uid: 167
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ansible_os_family in ['RedHat', 'Suse']
|
- ansible_os_family in ['RedHat', 'Suse']
|
||||||
|
|
||||||
- name: set_fact ceph_uid for debian based system - container
|
- name: set_fact ceph_uid for debian based system - container
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_uid: 64045
|
ceph_uid: 64045
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_docker_image_tag | string is search("ubuntu")
|
- ceph_docker_image_tag | string is search("ubuntu")
|
||||||
|
|
||||||
- name: set_fact ceph_uid for red hat based system - container
|
- name: set_fact ceph_uid for red hat based system - container
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_uid: 167
|
ceph_uid: 167
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- (ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
|
- (ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
|
||||||
or (ansible_distribution == 'RedHat'))
|
or (ansible_distribution == 'RedHat'))
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_uid: 167
|
ceph_uid: 167
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_docker_image is search("rhceph")
|
- ceph_docker_image is search("rhceph")
|
||||||
|
|
||||||
- name: set_fact rgw_hostname
|
- name: set_fact rgw_hostname
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
dest: "/etc/grafana/ceph-dashboard.crt"
|
dest: "/etc/grafana/ceph-dashboard.crt"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when:
|
when:
|
||||||
- grafana_crt
|
- grafana_crt | bool
|
||||||
- dashboard_protocol == "https"
|
- dashboard_protocol == "https"
|
||||||
|
|
||||||
- name: copy grafana SSL certificate key
|
- name: copy grafana SSL certificate key
|
||||||
|
@ -61,7 +61,7 @@
|
||||||
dest: "/etc/grafana/ceph-dashboard.key"
|
dest: "/etc/grafana/ceph-dashboard.key"
|
||||||
mode: 0440
|
mode: 0440
|
||||||
when:
|
when:
|
||||||
- grafana_key
|
- grafana_key | bool
|
||||||
- dashboard_protocol == "https"
|
- dashboard_protocol == "https"
|
||||||
|
|
||||||
- name: generate a Self Signed OpenSSL certificate for dashboard
|
- name: generate a Self Signed OpenSSL certificate for dashboard
|
||||||
|
@ -70,7 +70,7 @@
|
||||||
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca
|
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca
|
||||||
when:
|
when:
|
||||||
- dashboard_protocol == "https"
|
- dashboard_protocol == "https"
|
||||||
- not grafana_key or not grafana_crt
|
- not grafana_key | bool or not grafana_crt | bool
|
||||||
|
|
||||||
- name: set owner/group on /etc/grafana
|
- name: set owner/group on /etc/grafana
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: handlers
|
- name: handlers
|
||||||
when: not rolling_update
|
when: not rolling_update | bool
|
||||||
block:
|
block:
|
||||||
- name: update apt cache
|
- name: update apt cache
|
||||||
apt:
|
apt:
|
||||||
|
@ -27,7 +27,7 @@
|
||||||
listen: "restart ceph mons"
|
listen: "restart ceph mons"
|
||||||
when:
|
when:
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
|
|
||||||
- name: restart ceph mon daemon(s) - non container
|
- name: restart ceph mon daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
||||||
|
@ -35,10 +35,10 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- hostvars[item]['_mon_handler_called'] | default(False)
|
- hostvars[item]['_mon_handler_called'] | default(False) | bool
|
||||||
- mon_socket_stat.rc == 0
|
- mon_socket_stat.rc == 0
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
with_items: "{{ groups[mon_group_name] }}"
|
with_items: "{{ groups[mon_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
@ -49,11 +49,11 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_mon_container_stat.get('rc') == 0
|
- ceph_mon_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_mon_handler_called'] | default(False)
|
- hostvars[item]['_mon_handler_called'] | default(False) | bool
|
||||||
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
with_items: "{{ groups[mon_group_name] }}"
|
with_items: "{{ groups[mon_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
@ -84,21 +84,21 @@
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
|
|
||||||
- name: restart ceph osds daemon(s) - non container
|
- name: restart ceph osds daemon(s) - non container
|
||||||
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
||||||
listen: "restart ceph osds"
|
listen: "restart ceph osds"
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
||||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||||
- osd_socket_stat.rc == 0
|
- osd_socket_stat.rc == 0
|
||||||
- ceph_current_status.fsid is defined
|
- ceph_current_status.fsid is defined
|
||||||
- handler_health_osd_check
|
- handler_health_osd_check | bool
|
||||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
- hostvars[item]['_osd_handler_called'] | default(False) | bool
|
||||||
with_items: "{{ groups[osd_group_name] }}"
|
with_items: "{{ groups[osd_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
@ -110,12 +110,12 @@
|
||||||
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
||||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
- inventory_hostname == groups.get(osd_group_name) | last
|
- inventory_hostname == groups.get(osd_group_name) | last
|
||||||
- ceph_osd_container_stat.get('rc') == 0
|
- ceph_osd_container_stat.get('rc') == 0
|
||||||
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
|
||||||
- handler_health_osd_check
|
- handler_health_osd_check | bool
|
||||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
- hostvars[item]['_osd_handler_called'] | default(False)
|
||||||
with_items: "{{ groups[osd_group_name] }}"
|
with_items: "{{ groups[osd_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -147,8 +147,8 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- hostvars[item]['_mds_handler_called'] | default(False)
|
- hostvars[item]['_mds_handler_called'] | default(False) | bool
|
||||||
- mds_socket_stat.rc == 0
|
- mds_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[mds_group_name] }}"
|
with_items: "{{ groups[mds_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -160,9 +160,9 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mds_group_name in group_names
|
- mds_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_mds_container_stat.get('rc') == 0
|
- ceph_mds_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_mds_handler_called'] | default(False)
|
- hostvars[item]['_mds_handler_called'] | default(False) | bool
|
||||||
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[mds_group_name] }}"
|
with_items: "{{ groups[mds_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -194,8 +194,8 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- hostvars[item]['_rgw_handler_called'] | default(False)
|
- hostvars[item]['_rgw_handler_called'] | default(False) | bool
|
||||||
- rgw_socket_stat.rc == 0
|
- rgw_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[rgw_group_name] }}"
|
with_items: "{{ groups[rgw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -207,9 +207,9 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_rgw_container_stat.get('rc') == 0
|
- ceph_rgw_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rgw_handler_called'] | default(False)
|
- hostvars[item]['_rgw_handler_called'] | default(False) | bool
|
||||||
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[rgw_group_name] }}"
|
with_items: "{{ groups[rgw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -241,8 +241,8 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- hostvars[item]['_nfs_handler_called'] | default(False)
|
- hostvars[item]['_nfs_handler_called'] | default(False) | bool
|
||||||
- nfs_socket_stat.rc == 0
|
- nfs_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[nfs_group_name] }}"
|
with_items: "{{ groups[nfs_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -254,9 +254,9 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_nfs_container_stat.get('rc') == 0
|
- ceph_nfs_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_nfs_handler_called'] | default(False)
|
- hostvars[item]['_nfs_handler_called'] | default(False) | bool
|
||||||
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[nfs_group_name] }}"
|
with_items: "{{ groups[nfs_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -288,8 +288,8 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
- hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
|
||||||
- rbd_mirror_socket_stat.rc == 0
|
- rbd_mirror_socket_stat.rc == 0
|
||||||
with_items: "{{ groups[rbdmirror_group_name] }}"
|
with_items: "{{ groups[rbdmirror_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -301,9 +301,9 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- rbdmirror_group_name in group_names
|
- rbdmirror_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_rbd_mirror_container_stat.get('rc') == 0
|
- ceph_rbd_mirror_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
- hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
|
||||||
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[rbdmirror_group_name] }}"
|
with_items: "{{ groups[rbdmirror_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -335,10 +335,10 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- hostvars[item]['_mgr_handler_called'] | default(False)
|
- hostvars[item]['_mgr_handler_called'] | default(False) | bool
|
||||||
- mgr_socket_stat.rc == 0
|
- mgr_socket_stat.rc == 0
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
with_items: "{{ groups[mgr_group_name] }}"
|
with_items: "{{ groups[mgr_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
@ -349,11 +349,11 @@
|
||||||
when:
|
when:
|
||||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||||
- mgr_group_name in group_names
|
- mgr_group_name in group_names
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_mgr_container_stat.get('rc') == 0
|
- ceph_mgr_container_stat.get('rc') == 0
|
||||||
- hostvars[item]['_mgr_handler_called'] | default(False)
|
- hostvars[item]['_mgr_handler_called'] | default(False) | bool
|
||||||
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
|
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
with_items: "{{ groups[mgr_group_name] }}"
|
with_items: "{{ groups[mgr_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
run_once: True
|
run_once: True
|
||||||
|
@ -384,7 +384,7 @@
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
- ceph_tcmu_runner_stat.get('rc') == 0
|
- ceph_tcmu_runner_stat.get('rc') == 0
|
||||||
- hostvars[item]['_tcmu_runner_handler_called'] | default(False)
|
- hostvars[item]['_tcmu_runner_handler_called'] | default(False) | bool
|
||||||
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
|
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -416,7 +416,7 @@
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
- ceph_rbd_target_gw_stat.get('rc') == 0
|
- ceph_rbd_target_gw_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False)
|
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False) | bool
|
||||||
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
|
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
@ -448,7 +448,7 @@
|
||||||
when:
|
when:
|
||||||
- iscsi_gw_group_name in group_names
|
- iscsi_gw_group_name in group_names
|
||||||
- ceph_rbd_target_api_stat.get('rc') == 0
|
- ceph_rbd_target_api_stat.get('rc') == 0
|
||||||
- hostvars[item]['_rbd_target_api_handler_called'] | default(False)
|
- hostvars[item]['_rbd_target_api_handler_called'] | default(False) | bool
|
||||||
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
|
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
|
||||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ item }}"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
- name: include check_running_containers.yml
|
- name: include check_running_containers.yml
|
||||||
include_tasks: check_running_containers.yml
|
include_tasks: check_running_containers.yml
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include check_socket_non_container.yml
|
- name: include check_socket_non_container.yml
|
||||||
include_tasks: check_socket_non_container.yml
|
include_tasks: check_socket_non_container.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
|
@ -8,10 +8,10 @@
|
||||||
check_mode: no
|
check_mode: no
|
||||||
changed_when: false
|
changed_when: false
|
||||||
tags: firewall
|
tags: firewall
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- when: (firewalld_pkg_query.get('rc', 1) == 0
|
- when: (firewalld_pkg_query.get('rc', 1) == 0
|
||||||
or is_atomic)
|
or is_atomic | bool)
|
||||||
block:
|
block:
|
||||||
- name: start firewalld
|
- name: start firewalld
|
||||||
service:
|
service:
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
- name: include_tasks configure_firewall.yml
|
- name: include_tasks configure_firewall.yml
|
||||||
include_tasks: configure_firewall.yml
|
include_tasks: configure_firewall.yml
|
||||||
when:
|
when:
|
||||||
- configure_firewall
|
- configure_firewall | bool
|
||||||
- ansible_os_family in ['RedHat', 'Suse']
|
- ansible_os_family in ['RedHat', 'Suse']
|
||||||
tags: configure_firewall
|
tags: configure_firewall
|
||||||
|
|
||||||
- name: include_tasks setup_ntp.yml
|
- name: include_tasks setup_ntp.yml
|
||||||
include_tasks: setup_ntp.yml
|
include_tasks: setup_ntp.yml
|
||||||
when: ntp_service_enabled
|
when: ntp_service_enabled | bool
|
||||||
tags: configure_ntp
|
tags: configure_ntp
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Installation of NTP daemons needs to be a separate task since installations
|
# Installation of NTP daemons needs to be a separate task since installations
|
||||||
# can't happen on Atomic
|
# can't happen on Atomic
|
||||||
- name: install the ntp daemon
|
- name: install the ntp daemon
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
block:
|
block:
|
||||||
- name: install ntpd
|
- name: install ntpd
|
||||||
package:
|
package:
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "{{ ceph_keyring_permissions }}"
|
mode: "{{ ceph_keyring_permissions }}"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
||||||
- name: deploy gateway settings, used by the ceph_iscsi_config modules
|
- name: deploy gateway settings, used by the ceph_iscsi_config modules
|
||||||
template:
|
template:
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: check if a rbd pool exists
|
- name: check if a rbd pool exists
|
||||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
|
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
|
||||||
|
|
|
@ -4,19 +4,19 @@
|
||||||
|
|
||||||
- name: include non-container/prerequisites.yml
|
- name: include non-container/prerequisites.yml
|
||||||
include_tasks: non-container/prerequisites.yml
|
include_tasks: non-container/prerequisites.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
|
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
|
||||||
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
|
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
|
||||||
# the API for https support.
|
# the API for https support.
|
||||||
- name: include deploy_ssl_keys.yml
|
- name: include deploy_ssl_keys.yml
|
||||||
include_tasks: deploy_ssl_keys.yml
|
include_tasks: deploy_ssl_keys.yml
|
||||||
when: generate_crt|bool
|
when: generate_crt | bool
|
||||||
|
|
||||||
- name: include non-container/configure_iscsi.yml
|
- name: include non-container/configure_iscsi.yml
|
||||||
include_tasks: non-container/configure_iscsi.yml
|
include_tasks: non-container/configure_iscsi.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: include containerized.yml
|
- name: include containerized.yml
|
||||||
include_tasks: container/containerized.yml
|
include_tasks: container/containerized.yml
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
when:
|
when:
|
||||||
- ceph_origin == 'repository'
|
- ceph_origin == 'repository'
|
||||||
- ceph_repository == 'dev'
|
- ceph_repository == 'dev'
|
||||||
- ceph_iscsi_config_dev
|
- ceph_iscsi_config_dev | bool
|
||||||
block:
|
block:
|
||||||
- name: set_fact ceph_iscsi_repos
|
- name: set_fact ceph_iscsi_repos
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -3,20 +3,20 @@
|
||||||
include_tasks: create_mds_filesystems.yml
|
include_tasks: create_mds_filesystems.yml
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups[mds_group_name] | first
|
- inventory_hostname == groups[mds_group_name] | first
|
||||||
- not rolling_update
|
- not rolling_update | bool
|
||||||
|
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include common.yml
|
- name: include common.yml
|
||||||
include_tasks: common.yml
|
include_tasks: common.yml
|
||||||
|
|
||||||
- name: non_containerized.yml
|
- name: non_containerized.yml
|
||||||
include_tasks: non_containerized.yml
|
include_tasks: non_containerized.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: containerized.yml
|
- name: containerized.yml
|
||||||
include_tasks: containerized.yml
|
include_tasks: containerized.yml
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
|
@ -64,7 +64,7 @@
|
||||||
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
mode: "{{ ceph_keyring_permissions }}"
|
mode: "{{ ceph_keyring_permissions }}"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
||||||
- name: copy ceph keyring(s) if needed
|
- name: copy ceph keyring(s) if needed
|
||||||
copy:
|
copy:
|
||||||
|
@ -74,7 +74,7 @@
|
||||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
mode: "{{ ceph_keyring_permissions }}"
|
mode: "{{ ceph_keyring_permissions }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- groups.get(mgr_group_name, []) | length > 0
|
- groups.get(mgr_group_name, []) | length > 0
|
||||||
- copy_admin_key | bool
|
- copy_admin_key | bool
|
||||||
|
|
||||||
|
@ -84,4 +84,4 @@
|
||||||
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
mode: "{{ ceph_keyring_permissions }}"
|
mode: "{{ ceph_keyring_permissions }}"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
|
@ -2,14 +2,14 @@
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include common.yml
|
- name: include common.yml
|
||||||
include_tasks: common.yml
|
include_tasks: common.yml
|
||||||
|
|
||||||
- name: include pre_requisite.yml
|
- name: include pre_requisite.yml
|
||||||
include_tasks: pre_requisite.yml
|
include_tasks: pre_requisite.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: include start_mgr.yml
|
- name: include start_mgr.yml
|
||||||
include_tasks: start_mgr.yml
|
include_tasks: start_mgr.yml
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
notify: restart ceph mgrs
|
notify: restart ceph mgrs
|
||||||
|
|
||||||
- name: systemd start mgr
|
- name: systemd start mgr
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: tasks for MONs when cephx is enabled
|
- name: tasks for MONs when cephx is enabled
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
block:
|
block:
|
||||||
- name: fetch ceph initial keys
|
- name: fetch ceph initial keys
|
||||||
ceph_key:
|
ceph_key:
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
register: config_crush_hierarchy
|
register: config_crush_hierarchy
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups.get(mon_group_name) | last
|
- inventory_hostname == groups.get(mon_group_name) | last
|
||||||
- create_crush_tree
|
- create_crush_tree | bool
|
||||||
- hostvars[item]['osd_crush_location'] is defined
|
- hostvars[item]['osd_crush_location'] is defined
|
||||||
|
|
||||||
- name: create configured crush rules
|
- name: create configured crush rules
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
cp /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
|
cp /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
|
||||||
/etc/ceph/{{ cluster }}.mon.keyring
|
/etc/ceph/{{ cluster }}.mon.keyring
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: create (and fix ownership of) monitor directory
|
- name: create (and fix ownership of) monitor directory
|
||||||
file:
|
file:
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
register: create_custom_admin_secret
|
register: create_custom_admin_secret
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- admin_secret != 'admin_secret'
|
- admin_secret != 'admin_secret'
|
||||||
|
|
||||||
- name: set_fact ceph-authtool container command
|
- name: set_fact ceph-authtool container command
|
||||||
|
@ -88,7 +88,7 @@
|
||||||
/var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
|
/var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||||
when:
|
when:
|
||||||
- not create_custom_admin_secret.get('skipped')
|
- not create_custom_admin_secret.get('skipped')
|
||||||
- cephx
|
- cephx | bool
|
||||||
- admin_secret != 'admin_secret'
|
- admin_secret != 'admin_secret'
|
||||||
|
|
||||||
- name: set_fact ceph-mon container command
|
- name: set_fact ceph-mon container command
|
||||||
|
@ -107,7 +107,7 @@
|
||||||
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
|
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
|
||||||
args:
|
args:
|
||||||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
|
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
||||||
- name: ceph monitor mkfs without keyring
|
- name: ceph monitor mkfs without keyring
|
||||||
command: >
|
command: >
|
||||||
|
@ -120,4 +120,4 @@
|
||||||
--fsid {{ fsid }}
|
--fsid {{ fsid }}
|
||||||
args:
|
args:
|
||||||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
||||||
when: not cephx
|
when: not cephx | bool
|
||||||
|
|
|
@ -2,29 +2,28 @@
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include deploy_monitors.yml
|
- name: include deploy_monitors.yml
|
||||||
include_tasks: deploy_monitors.yml
|
include_tasks: deploy_monitors.yml
|
||||||
when:
|
when:
|
||||||
# we test for both container and non-container
|
# we test for both container and non-container
|
||||||
- (mon_socket_stat is defined and mon_socket_stat.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
|
- (mon_socket_stat is defined and mon_socket_stat.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
|
||||||
- not switch_to_containers | default(False)
|
- not switch_to_containers | default(False) | bool
|
||||||
|
|
||||||
- name: include start_monitor.yml
|
- name: include start_monitor.yml
|
||||||
include_tasks: start_monitor.yml
|
include_tasks: start_monitor.yml
|
||||||
|
|
||||||
- name: include_tasks ceph_keys.yml
|
- name: include_tasks ceph_keys.yml
|
||||||
include_tasks: ceph_keys.yml
|
include_tasks: ceph_keys.yml
|
||||||
when: not switch_to_containers | default(False)
|
when: not switch_to_containers | default(False) | bool
|
||||||
|
|
||||||
- name: include secure_cluster.yml
|
- name: include secure_cluster.yml
|
||||||
include_tasks: secure_cluster.yml
|
include_tasks: secure_cluster.yml
|
||||||
when:
|
when:
|
||||||
- secure_cluster
|
- secure_cluster | bool
|
||||||
- inventory_hostname == groups[mon_group_name] | first
|
- inventory_hostname == groups[mon_group_name] | first
|
||||||
|
|
||||||
- name: crush_rules.yml
|
- name: crush_rules.yml
|
||||||
include_tasks: crush_rules.yml
|
include_tasks: crush_rules.yml
|
||||||
when: crush_rule_config
|
when: crush_rule_config | bool
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
state: directory
|
state: directory
|
||||||
path: "/etc/systemd/system/ceph-mon@.service.d/"
|
path: "/etc/systemd/system/ceph-mon@.service.d/"
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ceph_mon_systemd_overrides is defined
|
- ceph_mon_systemd_overrides is defined
|
||||||
- ansible_service_mgr == 'systemd'
|
- ansible_service_mgr == 'systemd'
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
||||||
config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}"
|
config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}"
|
||||||
config_type: "ini"
|
config_type: "ini"
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ceph_mon_systemd_overrides is defined
|
- ceph_mon_systemd_overrides is defined
|
||||||
- ansible_service_mgr == 'systemd'
|
- ansible_service_mgr == 'systemd'
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: restart ceph mons
|
notify: restart ceph mons
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: start the monitor service
|
- name: start the monitor service
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -10,6 +10,6 @@
|
||||||
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
||||||
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- item.copy_key|bool
|
- item.copy_key | bool
|
||||||
- groups.get(mon_group_name, []) | length > 0
|
- groups.get(mon_group_name, []) | length > 0
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: set_fact container_exec_cmd_nfs
|
- name: set_fact container_exec_cmd_nfs
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: check if "{{ ceph_nfs_rgw_user }}" exists
|
- name: check if "{{ ceph_nfs_rgw_user }}" exists
|
||||||
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
|
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
|
||||||
|
@ -11,7 +11,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: nfs_obj_gw
|
when: nfs_obj_gw | bool
|
||||||
|
|
||||||
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
|
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
|
||||||
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
|
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when:
|
when:
|
||||||
- nfs_obj_gw
|
- nfs_obj_gw | bool
|
||||||
- rgwuser_exists.get('rc', 1) != 0
|
- rgwuser_exists.get('rc', 1) != 0
|
||||||
|
|
||||||
- name: set_fact ceph_nfs_rgw_access_key
|
- name: set_fact ceph_nfs_rgw_access_key
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['access_key'] }}"
|
ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['access_key'] }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when:
|
when:
|
||||||
- nfs_obj_gw
|
- nfs_obj_gw | bool
|
||||||
- ceph_nfs_rgw_access_key is not defined
|
- ceph_nfs_rgw_access_key is not defined
|
||||||
|
|
||||||
- name: set_fact ceph_nfs_rgw_secret_key
|
- name: set_fact ceph_nfs_rgw_secret_key
|
||||||
|
@ -36,5 +36,5 @@
|
||||||
ceph_nfs_rgw_secret_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['secret_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['secret_key'] }}"
|
ceph_nfs_rgw_secret_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['secret_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['secret_key'] }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when:
|
when:
|
||||||
- nfs_obj_gw
|
- nfs_obj_gw | bool
|
||||||
- ceph_nfs_rgw_secret_key is not defined
|
- ceph_nfs_rgw_secret_key is not defined
|
||||||
|
|
|
@ -2,18 +2,18 @@
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
|
container_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include common.yml
|
- name: include common.yml
|
||||||
include_tasks: common.yml
|
include_tasks: common.yml
|
||||||
|
|
||||||
- name: include pre_requisite_non_container.yml
|
- name: include pre_requisite_non_container.yml
|
||||||
include_tasks: pre_requisite_non_container.yml
|
include_tasks: pre_requisite_non_container.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: include pre_requisite_container.yml
|
- name: include pre_requisite_container.yml
|
||||||
include_tasks: pre_requisite_container.yml
|
include_tasks: pre_requisite_container.yml
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include create_rgw_nfs_user.yml
|
- name: include create_rgw_nfs_user.yml
|
||||||
import_tasks: create_rgw_nfs_user.yml
|
import_tasks: create_rgw_nfs_user.yml
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
- name: include ganesha_selinux_fix.yml
|
- name: include ganesha_selinux_fix.yml
|
||||||
import_tasks: ganesha_selinux_fix.yml
|
import_tasks: ganesha_selinux_fix.yml
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ansible_os_family == 'RedHat'
|
- ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- name: copy rgw keyring when deploying internal ganesha with external ceph cluster
|
- name: copy rgw keyring when deploying internal ganesha with external ceph cluster
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
admin_keyring:
|
admin_keyring:
|
||||||
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
|
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
|
||||||
when: copy_admin_key
|
when: copy_admin_key | bool
|
||||||
|
|
||||||
- name: set_fact ceph_config_keys
|
- name: set_fact ceph_config_keys
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -15,7 +15,7 @@
|
||||||
- name: merge ceph_config_keys and admin_keyring
|
- name: merge ceph_config_keys and admin_keyring
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
|
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
|
||||||
when: copy_admin_key
|
when: copy_admin_key | bool
|
||||||
|
|
||||||
- name: stat for config and keys
|
- name: stat for config and keys
|
||||||
stat:
|
stat:
|
||||||
|
@ -55,4 +55,4 @@
|
||||||
|
|
||||||
- name: reload dbus configuration
|
- name: reload dbus configuration
|
||||||
command: "killall -SIGHUP dbus-daemon"
|
command: "killall -SIGHUP dbus-daemon"
|
||||||
when: ceph_nfs_dynamic_exports
|
when: ceph_nfs_dynamic_exports | bool
|
||||||
|
|
|
@ -39,11 +39,11 @@
|
||||||
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
|
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
|
||||||
- { name: "/var/log/ceph", create: true }
|
- { name: "/var/log/ceph", create: true }
|
||||||
- { name: "/var/run/ceph", create: true }
|
- { name: "/var/run/ceph", create: true }
|
||||||
when: item.create|bool
|
when: item.create | bool
|
||||||
|
|
||||||
- name: cephx related tasks
|
- name: cephx related tasks
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- groups.get(mon_group_name, []) | length > 0
|
- groups.get(mon_group_name, []) | length > 0
|
||||||
block:
|
block:
|
||||||
- name: copy bootstrap cephx keys
|
- name: copy bootstrap cephx keys
|
||||||
|
@ -55,10 +55,10 @@
|
||||||
mode: "0600"
|
mode: "0600"
|
||||||
with_items:
|
with_items:
|
||||||
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
|
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
|
||||||
when: item.copy_key|bool
|
when: item.copy_key | bool
|
||||||
|
|
||||||
- name: nfs object gateway related tasks
|
- name: nfs object gateway related tasks
|
||||||
when: nfs_obj_gw
|
when: nfs_obj_gw | bool
|
||||||
block:
|
block:
|
||||||
- name: create rados gateway keyring
|
- name: create rados gateway keyring
|
||||||
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
|
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
block:
|
block:
|
||||||
- name: stable repos specific tasks
|
- name: stable repos specific tasks
|
||||||
when:
|
when:
|
||||||
- nfs_ganesha_stable
|
- nfs_ganesha_stable | bool
|
||||||
- ceph_repository == 'community'
|
- ceph_repository == 'community'
|
||||||
block:
|
block:
|
||||||
- name: add nfs-ganesha stable repository
|
- name: add nfs-ganesha stable repository
|
||||||
|
@ -25,7 +25,7 @@
|
||||||
|
|
||||||
- name: debian based systems - dev repos specific tasks
|
- name: debian based systems - dev repos specific tasks
|
||||||
when:
|
when:
|
||||||
- nfs_ganesha_dev
|
- nfs_ganesha_dev | bool
|
||||||
- ceph_repository == 'dev'
|
- ceph_repository == 'dev'
|
||||||
block:
|
block:
|
||||||
- name: fetch nfs-ganesha development repository
|
- name: fetch nfs-ganesha development repository
|
||||||
|
@ -62,14 +62,14 @@
|
||||||
allow_unauthenticated: yes
|
allow_unauthenticated: yes
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: nfs_obj_gw
|
when: nfs_obj_gw | bool
|
||||||
- name: install nfs rgw/cephfs gateway - debian
|
- name: install nfs rgw/cephfs gateway - debian
|
||||||
apt:
|
apt:
|
||||||
name: nfs-ganesha-ceph
|
name: nfs-ganesha-ceph
|
||||||
allow_unauthenticated: yes
|
allow_unauthenticated: yes
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: nfs_file_gw
|
when: nfs_file_gw | bool
|
||||||
|
|
||||||
- name: debian based systems - rhcs installation
|
- name: debian based systems - rhcs installation
|
||||||
when:
|
when:
|
||||||
|
@ -88,11 +88,11 @@
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: nfs_file_gw
|
when: nfs_file_gw | bool
|
||||||
- name: install red hat storage nfs obj gateway
|
- name: install red hat storage nfs obj gateway
|
||||||
apt:
|
apt:
|
||||||
name: nfs-ganesha-rgw
|
name: nfs-ganesha-rgw
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: nfs_obj_gw
|
when: nfs_obj_gw | bool
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
- name: set_fact container_exec_cmd_nfs
|
- name: set_fact container_exec_cmd_nfs
|
||||||
set_fact:
|
set_fact:
|
||||||
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: check if rados index object exists
|
- name: check if rados index object exists
|
||||||
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
|
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
|
||||||
|
@ -11,14 +11,14 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: rados_index_exists
|
register: rados_index_exists
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: ceph_nfs_rados_backend
|
when: ceph_nfs_rados_backend | bool
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: create an empty rados index object
|
- name: create an empty rados index object
|
||||||
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
|
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
|
||||||
when:
|
when:
|
||||||
- ceph_nfs_rados_backend
|
- ceph_nfs_rados_backend | bool
|
||||||
- rados_index_exists.rc != 0
|
- rados_index_exists.rc != 0
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -48,7 +48,7 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
when: ceph_nfs_dynamic_exports
|
when: ceph_nfs_dynamic_exports | bool
|
||||||
|
|
||||||
- name: create exports dir index file
|
- name: create exports dir index file
|
||||||
copy:
|
copy:
|
||||||
|
@ -58,7 +58,7 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
when: ceph_nfs_dynamic_exports
|
when: ceph_nfs_dynamic_exports | bool
|
||||||
|
|
||||||
- name: generate systemd unit file
|
- name: generate systemd unit file
|
||||||
become: true
|
become: true
|
||||||
|
@ -68,7 +68,7 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
notify: restart ceph nfss
|
notify: restart ceph nfss
|
||||||
|
|
||||||
- name: systemd start nfs container
|
- name: systemd start nfs container
|
||||||
|
@ -79,8 +79,8 @@
|
||||||
masked: no
|
masked: no
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_nfs_enable_service
|
- ceph_nfs_enable_service | bool
|
||||||
|
|
||||||
- name: start nfs gateway service
|
- name: start nfs gateway service
|
||||||
systemd:
|
systemd:
|
||||||
|
@ -89,5 +89,5 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
masked: no
|
masked: no
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ceph_nfs_enable_service
|
- ceph_nfs_enable_service | bool
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
with_items:
|
with_items:
|
||||||
- /var/lib/ceph/bootstrap-osd/
|
- /var/lib/ceph/bootstrap-osd/
|
||||||
- /var/lib/ceph/osd/
|
- /var/lib/ceph/osd/
|
||||||
|
@ -22,5 +22,5 @@
|
||||||
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
|
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
|
||||||
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- item.copy_key|bool
|
- item.copy_key | bool
|
||||||
|
|
|
@ -3,30 +3,30 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
|
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- osd_objectstore == 'filestore'
|
- osd_objectstore == 'filestore'
|
||||||
- not dmcrypt
|
- not dmcrypt | bool
|
||||||
|
|
||||||
- name: set_fact docker_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
|
- name: set_fact docker_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
|
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- osd_objectstore == 'filestore'
|
- osd_objectstore == 'filestore'
|
||||||
- dmcrypt
|
- dmcrypt | bool
|
||||||
|
|
||||||
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
|
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
|
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- osd_objectstore == 'bluestore'
|
- osd_objectstore == 'bluestore'
|
||||||
- not dmcrypt
|
- not dmcrypt | bool
|
||||||
|
|
||||||
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
|
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
|
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- osd_objectstore == 'bluestore'
|
- osd_objectstore == 'bluestore'
|
||||||
- dmcrypt
|
- dmcrypt | bool
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when:
|
when:
|
||||||
- not containerized_deployment
|
- not containerized_deployment | bool
|
||||||
- ansible_os_family != 'ClearLinux'
|
- ansible_os_family != 'ClearLinux'
|
||||||
|
|
||||||
- name: install numactl when needed
|
- name: install numactl when needed
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when:
|
when:
|
||||||
- containerized_deployment
|
- containerized_deployment | bool
|
||||||
- ceph_osd_numactl_opts != ""
|
- ceph_osd_numactl_opts != ""
|
||||||
tags: with_pkg
|
tags: with_pkg
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@
|
||||||
name: lvm2
|
name: lvm2
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
tags: with_pkg
|
tags: with_pkg
|
||||||
|
|
||||||
- name: include_tasks common.yml
|
- name: include_tasks common.yml
|
||||||
|
@ -47,13 +47,13 @@
|
||||||
include_tasks: scenarios/lvm.yml
|
include_tasks: scenarios/lvm.yml
|
||||||
when:
|
when:
|
||||||
- lvm_volumes|length > 0
|
- lvm_volumes|length > 0
|
||||||
- not rolling_update|default(False)
|
- not rolling_update|default(False) | bool
|
||||||
|
|
||||||
- name: include_tasks scenarios/lvm-batch.yml
|
- name: include_tasks scenarios/lvm-batch.yml
|
||||||
include_tasks: scenarios/lvm-batch.yml
|
include_tasks: scenarios/lvm-batch.yml
|
||||||
when:
|
when:
|
||||||
- devices|length > 0
|
- devices|length > 0
|
||||||
- not rolling_update|default(False)
|
- not rolling_update|default(False) | bool
|
||||||
|
|
||||||
- name: include_tasks start_osds.yml
|
- name: include_tasks start_osds.yml
|
||||||
include_tasks: start_osds.yml
|
include_tasks: start_osds.yml
|
||||||
|
@ -63,8 +63,8 @@
|
||||||
openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
|
openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
|
||||||
with_items: "{{ openstack_keys }}"
|
with_items: "{{ openstack_keys }}"
|
||||||
when:
|
when:
|
||||||
- not add_osd|default(False)
|
- not add_osd|default(False) | bool
|
||||||
- openstack_config
|
- openstack_config | bool
|
||||||
- item.get('mon_cap', None)
|
- item.get('mon_cap', None)
|
||||||
# it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
|
# it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
|
||||||
|
|
||||||
|
@ -72,13 +72,13 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
openstack_keys: "{{ openstack_keys_tmp }}"
|
openstack_keys: "{{ openstack_keys_tmp }}"
|
||||||
when:
|
when:
|
||||||
- not add_osd|default(False)
|
- not add_osd|default(False) | bool
|
||||||
- openstack_keys_tmp is defined
|
- openstack_keys_tmp is defined
|
||||||
|
|
||||||
# Create the pools listed in openstack_pools
|
# Create the pools listed in openstack_pools
|
||||||
- name: include openstack_config.yml
|
- name: include openstack_config.yml
|
||||||
include_tasks: openstack_config.yml
|
include_tasks: openstack_config.yml
|
||||||
when:
|
when:
|
||||||
- not add_osd|default(False)
|
- not add_osd|default(False) | bool
|
||||||
- openstack_config
|
- openstack_config | bool
|
||||||
- inventory_hostname == groups[osd_group_name] | last
|
- inventory_hostname == groups[osd_group_name] | last
|
||||||
|
|
|
@ -79,7 +79,7 @@
|
||||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||||
with_items: "{{ openstack_keys }}"
|
with_items: "{{ openstack_keys }}"
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
||||||
- name: fetch openstack cephx key(s)
|
- name: fetch openstack cephx key(s)
|
||||||
fetch:
|
fetch:
|
||||||
|
@ -101,6 +101,6 @@
|
||||||
- "{{ openstack_keys }}"
|
- "{{ openstack_keys }}"
|
||||||
delegate_to: "{{ item.0 }}"
|
delegate_to: "{{ item.0 }}"
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- openstack_config
|
- openstack_config | bool
|
||||||
- item.0 != groups[mon_group_name]
|
- item.0 != groups[mon_group_name]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: container specific tasks
|
- name: container specific tasks
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
block:
|
block:
|
||||||
- name: umount ceph disk (if on openstack)
|
- name: umount ceph disk (if on openstack)
|
||||||
mount:
|
mount:
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
src: /dev/vdb
|
src: /dev/vdb
|
||||||
fstype: ext3
|
fstype: ext3
|
||||||
state: unmounted
|
state: unmounted
|
||||||
when: ceph_docker_on_openstack
|
when: ceph_docker_on_openstack | bool
|
||||||
|
|
||||||
- name: generate ceph osd docker run script
|
- name: generate ceph osd docker run script
|
||||||
become: true
|
become: true
|
||||||
|
@ -47,7 +47,7 @@
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
notify: restart ceph osds
|
notify: restart ceph osds
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: systemd start osd
|
- name: systemd start osd
|
||||||
systemd:
|
systemd:
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
register: "tmpfiles_d"
|
register: "tmpfiles_d"
|
||||||
when: disable_transparent_hugepage
|
when: disable_transparent_hugepage | bool
|
||||||
|
|
||||||
- name: disable transparent hugepage
|
- name: disable transparent hugepage
|
||||||
template:
|
template:
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
force: "yes"
|
force: "yes"
|
||||||
validate: "systemd-tmpfiles --create %s"
|
validate: "systemd-tmpfiles --create %s"
|
||||||
when: disable_transparent_hugepage
|
when: disable_transparent_hugepage | bool
|
||||||
|
|
||||||
- name: get default vm.min_free_kbytes
|
- name: get default vm.min_free_kbytes
|
||||||
command: sysctl -b vm.min_free_kbytes
|
command: sysctl -b vm.min_free_kbytes
|
||||||
|
@ -58,4 +58,4 @@
|
||||||
with_items:
|
with_items:
|
||||||
- { name: "fs.aio-max-nr", value: "1048576", enable: (osd_objectstore == 'bluestore') }
|
- { name: "fs.aio-max-nr", value: "1048576", enable: (osd_objectstore == 'bluestore') }
|
||||||
- "{{ os_tuning_params }}"
|
- "{{ os_tuning_params }}"
|
||||||
when: item.enable | default(true)
|
when: item.enable | default(true) | bool
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
-o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
|
-o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
|
||||||
args:
|
args:
|
||||||
creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
|
creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: set rbd-mirror key permissions
|
- name: set rbd-mirror key permissions
|
||||||
file:
|
file:
|
||||||
|
@ -26,4 +26,4 @@
|
||||||
owner: "ceph"
|
owner: "ceph"
|
||||||
group: "ceph"
|
group: "ceph"
|
||||||
mode: "{{ ceph_keyring_permissions }}"
|
mode: "{{ ceph_keyring_permissions }}"
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
|
@ -1,24 +1,24 @@
|
||||||
---
|
---
|
||||||
- name: include pre_requisite.yml
|
- name: include pre_requisite.yml
|
||||||
include_tasks: pre_requisite.yml
|
include_tasks: pre_requisite.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: include common.yml
|
- name: include common.yml
|
||||||
include_tasks: common.yml
|
include_tasks: common.yml
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
||||||
- name: tasks for non-containerized deployment
|
- name: tasks for non-containerized deployment
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
block:
|
block:
|
||||||
- name: include start_rbd_mirror.yml
|
- name: include start_rbd_mirror.yml
|
||||||
include_tasks: start_rbd_mirror.yml
|
include_tasks: start_rbd_mirror.yml
|
||||||
|
|
||||||
- name: include configure_mirroring.yml
|
- name: include configure_mirroring.yml
|
||||||
include_tasks: configure_mirroring.yml
|
include_tasks: configure_mirroring.yml
|
||||||
when: ceph_rbd_mirror_configure
|
when: ceph_rbd_mirror_configure | bool
|
||||||
|
|
||||||
- name: tasks for containerized deployment
|
- name: tasks for containerized deployment
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
block:
|
block:
|
||||||
- name: set_fact container_exec_cmd
|
- name: set_fact container_exec_cmd
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -29,5 +29,5 @@
|
||||||
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
||||||
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx | bool
|
||||||
- item.copy_key|bool
|
- item.copy_key | bool
|
||||||
|
|
|
@ -4,23 +4,23 @@
|
||||||
|
|
||||||
- name: include_tasks pre_requisite.yml
|
- name: include_tasks pre_requisite.yml
|
||||||
include_tasks: pre_requisite.yml
|
include_tasks: pre_requisite.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: include_tasks openstack-keystone.yml
|
- name: include_tasks openstack-keystone.yml
|
||||||
include_tasks: openstack-keystone.yml
|
include_tasks: openstack-keystone.yml
|
||||||
when: radosgw_keystone_ssl|bool
|
when: radosgw_keystone_ssl | bool
|
||||||
|
|
||||||
- name: include_tasks start_radosgw.yml
|
- name: include_tasks start_radosgw.yml
|
||||||
include_tasks: start_radosgw.yml
|
include_tasks: start_radosgw.yml
|
||||||
when: not containerized_deployment
|
when: not containerized_deployment | bool
|
||||||
|
|
||||||
- name: include_tasks docker/main.yml
|
- name: include_tasks docker/main.yml
|
||||||
include_tasks: docker/main.yml
|
include_tasks: docker/main.yml
|
||||||
when: containerized_deployment
|
when: containerized_deployment | bool
|
||||||
|
|
||||||
- name: include_tasks multisite/main.yml
|
- name: include_tasks multisite/main.yml
|
||||||
include_tasks: multisite/main.yml
|
include_tasks: multisite/main.yml
|
||||||
when: rgw_multisite
|
when: rgw_multisite | bool
|
||||||
|
|
||||||
- name: rgw pool related tasks
|
- name: rgw pool related tasks
|
||||||
when: rgw_create_pools is defined
|
when: rgw_create_pools is defined
|
||||||
|
|
|
@ -6,14 +6,14 @@
|
||||||
- name: include_tasks master.yml
|
- name: include_tasks master.yml
|
||||||
include_tasks: master.yml
|
include_tasks: master.yml
|
||||||
when:
|
when:
|
||||||
- rgw_zonemaster
|
- rgw_zonemaster | bool
|
||||||
- not rgw_zonesecondary
|
- not rgw_zonesecondary | bool
|
||||||
|
|
||||||
- name: include_tasks secondary.yml
|
- name: include_tasks secondary.yml
|
||||||
include_tasks: secondary.yml
|
include_tasks: secondary.yml
|
||||||
when:
|
when:
|
||||||
- not rgw_zonemaster
|
- not rgw_zonemaster | bool
|
||||||
- rgw_zonesecondary
|
- rgw_zonesecondary | bool
|
||||||
|
|
||||||
# Continue with common tasks
|
# Continue with common tasks
|
||||||
- name: add zone to rgw stanza in ceph.conf
|
- name: add zone to rgw stanza in ceph.conf
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
|
creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_items: "{{ rgw_instances }}"
|
with_items: "{{ rgw_instances }}"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
||||||
- name: set rados gateway instance key permissions
|
- name: set rados gateway instance key permissions
|
||||||
file:
|
file:
|
||||||
|
@ -14,4 +14,4 @@
|
||||||
group: "ceph"
|
group: "ceph"
|
||||||
mode: "0600"
|
mode: "0600"
|
||||||
with_items: "{{ rgw_instances }}"
|
with_items: "{{ rgw_instances }}"
|
||||||
when: cephx
|
when: cephx | bool
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
fail:
|
fail:
|
||||||
msg: "fqdn configuration is not supported anymore. Use 'use_fqdn_yes_i_am_sure: true' if you really want to use it. See release notes for more details"
|
msg: "fqdn configuration is not supported anymore. Use 'use_fqdn_yes_i_am_sure: true' if you really want to use it. See release notes for more details"
|
||||||
when:
|
when:
|
||||||
- mon_use_fqdn or mds_use_fqdn
|
- mon_use_fqdn | bool or mds_use_fqdn | bool
|
||||||
- not use_fqdn_yes_i_am_sure
|
- not use_fqdn_yes_i_am_sure | bool
|
||||||
|
|
||||||
- name: debian based systems tasks
|
- name: debian based systems tasks
|
||||||
when: ansible_os_family == 'Debian'
|
when: ansible_os_family == 'Debian'
|
||||||
|
@ -31,7 +31,7 @@
|
||||||
fail:
|
fail:
|
||||||
msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd"
|
msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd"
|
||||||
when:
|
when:
|
||||||
- ntp_service_enabled
|
- ntp_service_enabled | bool
|
||||||
- ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd']
|
- ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd']
|
||||||
|
|
||||||
# Since NTPd can not be installed on Atomic...
|
# Since NTPd can not be installed on Atomic...
|
||||||
|
@ -39,7 +39,7 @@
|
||||||
fail:
|
fail:
|
||||||
msg: installation can't happen on Atomic and ntpd needs to be installed
|
msg: installation can't happen on Atomic and ntpd needs to be installed
|
||||||
when:
|
when:
|
||||||
- is_atomic | default(False)
|
- is_atomic | default(False) | bool
|
||||||
- ansible_os_family == 'RedHat'
|
- ansible_os_family == 'RedHat'
|
||||||
- ntp_daemon_type == 'ntpd'
|
- ntp_daemon_type == 'ntpd'
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@
|
||||||
include_tasks: check_devices.yml
|
include_tasks: check_devices.yml
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- not osd_auto_discovery | default(False)
|
- not osd_auto_discovery | default(False) | bool
|
||||||
- devices|default([])|length > 0
|
- devices|default([])|length > 0
|
||||||
|
|
||||||
- name: include check_eth_mon.yml
|
- name: include check_eth_mon.yml
|
||||||
|
@ -89,7 +89,7 @@
|
||||||
include_tasks: check_rgw_multisite.yml
|
include_tasks: check_rgw_multisite.yml
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups.get(rgw_group_name, [])
|
- inventory_hostname in groups.get(rgw_group_name, [])
|
||||||
- rgw_multisite
|
- rgw_multisite | bool
|
||||||
|
|
||||||
- name: include check_iscsi.yml
|
- name: include check_iscsi.yml
|
||||||
include_tasks: check_iscsi.yml
|
include_tasks: check_iscsi.yml
|
||||||
|
|
|
@ -107,7 +107,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -156,7 +156,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -200,7 +200,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -244,7 +244,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -288,7 +288,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -332,7 +332,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -376,7 +376,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -422,7 +422,7 @@
|
||||||
when: inventory_hostname == groups.get('clients', ['']) | first
|
when: inventory_hostname == groups.get('clients', ['']) | first
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -468,7 +468,7 @@
|
||||||
name: ceph-container-common
|
name: ceph-container-common
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-config
|
name: ceph-config
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
|
@ -542,7 +542,7 @@
|
||||||
name: ceph-prometheus
|
name: ceph-prometheus
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-grafana
|
name: ceph-grafana
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
|
|
||||||
- hosts: '{{ (groups["grafana-server"] | default(groups["mgrs"]) | default(groups["mons"]))[0] | default(omit) }}'
|
- hosts: '{{ (groups["grafana-server"] | default(groups["mgrs"]) | default(groups["mons"]))[0] | default(omit) }}'
|
||||||
become: true
|
become: true
|
||||||
|
@ -553,4 +553,4 @@
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-dashboard
|
name: ceph-dashboard
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
|
|
|
@ -461,7 +461,7 @@
|
||||||
name: ceph-container-engine
|
name: ceph-container-engine
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-node-exporter
|
name: ceph-node-exporter
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
|
|
||||||
|
|
||||||
- hosts: grafana-server
|
- hosts: grafana-server
|
||||||
|
@ -485,7 +485,7 @@
|
||||||
name: ceph-prometheus
|
name: ceph-prometheus
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-grafana
|
name: ceph-grafana
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
|
|
||||||
- hosts: '{{ (groups["grafana-server"] | default(groups["mgrs"]) | default(groups["mons"]))[0] | default(omit) }}'
|
- hosts: '{{ (groups["grafana-server"] | default(groups["mgrs"]) | default(groups["mons"]))[0] | default(omit) }}'
|
||||||
become: true
|
become: true
|
||||||
|
@ -493,7 +493,7 @@
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-defaults
|
name: ceph-defaults
|
||||||
tags: ['ceph_update_config']
|
tags: ['ceph_update_config']
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ceph-dashboard
|
name: ceph-dashboard
|
||||||
when: dashboard_enabled
|
when: dashboard_enabled | bool
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
state: present
|
state: present
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
- name: create physical volume
|
- name: create physical volume
|
||||||
command: pvcreate /dev/sdb
|
command: pvcreate /dev/sdb
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -21,14 +21,14 @@
|
||||||
state: present
|
state: present
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
|
|
||||||
- name: generate and upload a random 10Mb file - containerized deployment
|
- name: generate and upload a random 10Mb file - containerized deployment
|
||||||
command: >
|
command: >
|
||||||
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c 'dd if=/dev/urandom of=/tmp/testinfra.img bs=1M count=10; {{ s3cmd_cmd }} mb s3://testinfra; {{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra'
|
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c 'dd if=/dev/urandom of=/tmp/testinfra.img bs=1M count=10; {{ s3cmd_cmd }} mb s3://testinfra; {{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra'
|
||||||
when:
|
when:
|
||||||
- rgw_zonemaster
|
- rgw_zonemaster | bool
|
||||||
- containerized_deployment | default(False)
|
- containerized_deployment | default(False) | bool
|
||||||
|
|
||||||
- name: generate and upload a random a 10Mb file - non containerized
|
- name: generate and upload a random a 10Mb file - non containerized
|
||||||
shell: >
|
shell: >
|
||||||
|
@ -36,16 +36,16 @@
|
||||||
{{ s3cmd_cmd }} mb s3://testinfra;
|
{{ s3cmd_cmd }} mb s3://testinfra;
|
||||||
{{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra
|
{{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra
|
||||||
when:
|
when:
|
||||||
- rgw_zonemaster | default(False)
|
- rgw_zonemaster | default(False) | bool
|
||||||
- not containerized_deployment | default(False)
|
- not containerized_deployment | default(False) | bool
|
||||||
|
|
||||||
- name: get info from replicated file - containerized deployment
|
- name: get info from replicated file - containerized deployment
|
||||||
command: >
|
command: >
|
||||||
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c '{{ s3cmd_cmd }} info s3://testinfra/testinfra.img'
|
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c '{{ s3cmd_cmd }} info s3://testinfra/testinfra.img'
|
||||||
register: s3cmd_info_status
|
register: s3cmd_info_status
|
||||||
when:
|
when:
|
||||||
- not rgw_zonemaster | default(False)
|
- not rgw_zonemaster | default(False) | bool
|
||||||
- containerized_deployment | default(False)
|
- containerized_deployment | default(False) | bool
|
||||||
retries: 10
|
retries: 10
|
||||||
delay: 2
|
delay: 2
|
||||||
until: s3cmd_info_status.get('rc', 1) == 0
|
until: s3cmd_info_status.get('rc', 1) == 0
|
||||||
|
@ -55,8 +55,8 @@
|
||||||
{{ s3cmd_cmd }} info s3://testinfra/testinfra.img
|
{{ s3cmd_cmd }} info s3://testinfra/testinfra.img
|
||||||
register: s3cmd_info_status
|
register: s3cmd_info_status
|
||||||
when:
|
when:
|
||||||
- not rgw_zonemaster | default(False)
|
- not rgw_zonemaster | default(False) | bool
|
||||||
- not containerized_deployment | default(False)
|
- not containerized_deployment | default(False) | bool
|
||||||
retries: 10
|
retries: 10
|
||||||
delay: 2
|
delay: 2
|
||||||
until: s3cmd_info_status.get('rc', 1) == 0
|
until: s3cmd_info_status.get('rc', 1) == 0
|
||||||
|
|
|
@ -64,7 +64,7 @@
|
||||||
dest: /etc/yum.repos.d
|
dest: /etc/yum.repos.d
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
|
|
||||||
- name: enable the rhel-7-extras-nightly repo
|
- name: enable the rhel-7-extras-nightly repo
|
||||||
command: "yum-config-manager --enable rhel-7-extras-nightly"
|
command: "yum-config-manager --enable rhel-7-extras-nightly"
|
||||||
|
@ -105,7 +105,7 @@
|
||||||
baseurl: "{{ repo_url }}/MON/x86_64/os/"
|
baseurl: "{{ repo_url }}/MON/x86_64/os/"
|
||||||
gpgcheck: no
|
gpgcheck: no
|
||||||
enabled: yes
|
enabled: yes
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
|
|
||||||
- hosts: osds
|
- hosts: osds
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
@ -119,7 +119,7 @@
|
||||||
baseurl: "{{ repo_url }}/OSD/x86_64/os/"
|
baseurl: "{{ repo_url }}/OSD/x86_64/os/"
|
||||||
gpgcheck: no
|
gpgcheck: no
|
||||||
enabled: yes
|
enabled: yes
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
|
|
||||||
- name: set MTU on eth2
|
- name: set MTU on eth2
|
||||||
command: "ifconfig eth2 mtu 1400 up"
|
command: "ifconfig eth2 mtu 1400 up"
|
||||||
|
@ -136,4 +136,4 @@
|
||||||
baseurl: "{{ repo_url }}/Tools/x86_64/os/"
|
baseurl: "{{ repo_url }}/Tools/x86_64/os/"
|
||||||
gpgcheck: no
|
gpgcheck: no
|
||||||
enabled: yes
|
enabled: yes
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
state: present
|
state: present
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
when: not is_atomic
|
when: not is_atomic | bool
|
||||||
|
|
||||||
- name: centos based systems - configure repos
|
- name: centos based systems - configure repos
|
||||||
block:
|
block:
|
||||||
|
@ -66,7 +66,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- ansible_distribution == 'CentOS'
|
- ansible_distribution == 'CentOS'
|
||||||
- not is_atomic
|
- not is_atomic | bool
|
||||||
|
|
||||||
- name: resize logical volume for root partition to fill remaining free space
|
- name: resize logical volume for root partition to fill remaining free space
|
||||||
lvol:
|
lvol:
|
||||||
|
@ -74,4 +74,4 @@
|
||||||
vg: atomicos
|
vg: atomicos
|
||||||
size: +100%FREE
|
size: +100%FREE
|
||||||
resizefs: yes
|
resizefs: yes
|
||||||
when: is_atomic
|
when: is_atomic | bool
|
||||||
|
|
Loading…
Reference in New Issue