mirror of https://github.com/ceph/ceph-ansible.git
ansible: use 'bool' filter on boolean conditionals
By running ceph-ansible there are a lot ``[DEPRECATION WARNING]`` like these:
```
[DEPRECATION WARNING]: evaluating containerized_deployment as a bare variable,
this behaviour will go away and you might need to add |bool to the expression
in the future. Also see CONDITIONAL_BARE_VARS configuration toggle.. This
feature will be removed in version 2.12. Deprecation warnings can be disabled
by setting deprecation_warnings=False in ansible.cfg.
```
Now appended ``| bool`` on a lot of the affected variables.
Sometimes the coding style from ``variable|bool`` changed to ``variable | bool`` *(with spaces at the pipe)*.
Closes: #4022
Signed-off-by: L3D <l3d@c3woc.de>
(cherry picked from commit ab54fe20ec
)
pull/4070/head
parent
7a384e7ec2
commit
1daca1ba83
|
@ -386,7 +386,7 @@
|
|||
- wait for server to boot
|
||||
- remove data
|
||||
when:
|
||||
- reboot_osd_node
|
||||
- reboot_osd_node | bool
|
||||
- remove_osd_mountpoints.failed is defined
|
||||
|
||||
- name: wipe table on dm-crypt devices
|
||||
|
@ -658,7 +658,7 @@
|
|||
state: absent
|
||||
when:
|
||||
- ansible_pkg_mgr == 'yum'
|
||||
- purge_all_packages == true
|
||||
- purge_all_packages | bool
|
||||
|
||||
- name: purge remaining ceph packages with dnf
|
||||
dnf:
|
||||
|
@ -666,7 +666,7 @@
|
|||
state: absent
|
||||
when:
|
||||
- ansible_pkg_mgr == 'dnf'
|
||||
- purge_all_packages == true
|
||||
- purge_all_packages | bool
|
||||
|
||||
- name: purge remaining ceph packages with apt
|
||||
apt:
|
||||
|
@ -674,7 +674,7 @@
|
|||
state: absent
|
||||
when:
|
||||
- ansible_pkg_mgr == 'apt'
|
||||
- purge_all_packages == true
|
||||
- purge_all_packages | bool
|
||||
|
||||
- name: purge extra packages with yum
|
||||
yum:
|
||||
|
|
|
@ -84,7 +84,7 @@
|
|||
file:
|
||||
path: /etc/profile.d/ceph-aliases.sh
|
||||
state: absent
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: set mon_host_count
|
||||
set_fact:
|
||||
|
@ -114,7 +114,7 @@
|
|||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ groups[mon_group_name] }}"
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- inventory_hostname == groups[mon_group_name][0]
|
||||
|
||||
- name: create potentially missing keys (rbd and rbd-mirror)
|
||||
|
@ -133,7 +133,7 @@
|
|||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- inventory_hostname == groups[mon_group_name][0]
|
||||
|
||||
# NOTE: we mask the service so the RPM can't restart it
|
||||
|
@ -145,7 +145,7 @@
|
|||
enabled: no
|
||||
masked: yes
|
||||
ignore_errors: True
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
# NOTE: we mask the service so the RPM can't restart it
|
||||
# after the package gets upgraded
|
||||
|
@ -156,7 +156,7 @@
|
|||
enabled: no
|
||||
masked: yes
|
||||
ignore_errors: True
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
# only mask the service for mgr because it must be upgraded
|
||||
# after ALL monitors, even when collocated
|
||||
|
@ -175,7 +175,7 @@
|
|||
delegate_to: "{{ mon_host }}"
|
||||
when:
|
||||
- inventory_hostname == groups[mon_group_name][0]
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
|
||||
- name: set containerized osd flags
|
||||
command: >
|
||||
|
@ -186,16 +186,16 @@
|
|||
delegate_to: "{{ mon_host }}"
|
||||
when:
|
||||
- inventory_hostname == groups[mon_group_name][0]
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -206,7 +206,7 @@
|
|||
name: ceph-mon@{{ monitor_name }}
|
||||
state: started
|
||||
enabled: yes
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: start ceph mgr
|
||||
systemd:
|
||||
|
@ -214,7 +214,7 @@
|
|||
state: started
|
||||
enabled: yes
|
||||
ignore_errors: True # if no mgr collocated with mons
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: restart containerized ceph mon
|
||||
systemd:
|
||||
|
@ -222,7 +222,7 @@
|
|||
state: restarted
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: non container | waiting for the monitor to join the quorum...
|
||||
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
|
||||
|
@ -233,7 +233,7 @@
|
|||
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
|
||||
retries: "{{ health_mon_check_retries }}"
|
||||
delay: "{{ health_mon_check_delay }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: container | waiting for the containerized monitor to join the quorum...
|
||||
command: >
|
||||
|
@ -245,7 +245,7 @@
|
|||
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
|
||||
retries: "{{ health_mon_check_retries }}"
|
||||
delay: "{{ health_mon_check_delay }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: upgrade ceph mgr nodes when implicitly collocated on monitors
|
||||
vars:
|
||||
|
@ -274,10 +274,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -310,10 +310,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -334,19 +334,18 @@
|
|||
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
|
||||
register: osd_ids
|
||||
changed_when: false
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: get osd unit names - container
|
||||
shell: systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([a-z0-9]+).service"
|
||||
register: osd_names
|
||||
changed_when: false
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: set num_osds for container
|
||||
set_fact:
|
||||
num_osds: "{{ osd_names.stdout_lines|default([])|length }}"
|
||||
when:
|
||||
- containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: stop ceph osd
|
||||
systemd:
|
||||
|
@ -355,13 +354,12 @@
|
|||
enabled: no
|
||||
masked: yes
|
||||
with_items: "{{ osd_ids.stdout_lines }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: set num_osds for non container
|
||||
set_fact:
|
||||
num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
|
||||
when:
|
||||
- not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-defaults
|
||||
|
@ -371,10 +369,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -384,7 +382,7 @@
|
|||
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
|
||||
register: osd_ids
|
||||
changed_when: false
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: start ceph osd
|
||||
systemd:
|
||||
|
@ -393,7 +391,7 @@
|
|||
enabled: yes
|
||||
masked: no
|
||||
with_items: "{{ osd_ids.stdout_lines }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: restart containerized ceph osd
|
||||
systemd:
|
||||
|
@ -403,7 +401,7 @@
|
|||
masked: no
|
||||
daemon_reload: yes
|
||||
with_items: "{{ osd_names.stdout_lines }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: scan ceph-disk osds with ceph-volume if deploying nautilus
|
||||
command: "ceph-volume --cluster={{ cluster }} simple scan"
|
||||
|
@ -411,7 +409,7 @@
|
|||
CEPH_VOLUME_DEBUG: 1
|
||||
when:
|
||||
- ceph_release in ["nautilus", "octopus"]
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
|
||||
- name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
|
||||
command: "ceph-volume --cluster={{ cluster }} simple activate --all"
|
||||
|
@ -419,12 +417,12 @@
|
|||
CEPH_VOLUME_DEBUG: 1
|
||||
when:
|
||||
- ceph_release in ["nautilus", "octopus"]
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
|
||||
- name: set_fact container_exec_cmd_osd
|
||||
set_fact:
|
||||
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: get osd versions
|
||||
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
|
||||
|
@ -478,7 +476,7 @@
|
|||
- name: set_fact container_exec_cmd_osd
|
||||
set_fact:
|
||||
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: unset osd flags
|
||||
command: "{{ container_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
|
||||
|
@ -519,7 +517,7 @@
|
|||
state: stopped
|
||||
enabled: no
|
||||
masked: yes
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-defaults
|
||||
|
@ -529,10 +527,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -544,7 +542,7 @@
|
|||
state: started
|
||||
enabled: yes
|
||||
masked: no
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: restart ceph mds
|
||||
systemd:
|
||||
|
@ -553,7 +551,7 @@
|
|||
enabled: yes
|
||||
masked: no
|
||||
daemon_reload: yes
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
|
||||
- name: upgrade ceph rgws cluster
|
||||
|
@ -584,16 +582,16 @@
|
|||
enabled: no
|
||||
masked: yes
|
||||
with_items: "{{ rgw_instances }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -607,7 +605,7 @@
|
|||
masked: no
|
||||
daemon_reload: yes
|
||||
with_items: "{{ rgw_instances }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
|
||||
- name: upgrade ceph rbd mirror node
|
||||
|
@ -632,10 +630,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -647,7 +645,7 @@
|
|||
state: started
|
||||
enabled: yes
|
||||
masked: no
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: restart containerized ceph rbd mirror
|
||||
systemd:
|
||||
|
@ -656,7 +654,7 @@
|
|||
enabled: yes
|
||||
masked: no
|
||||
daemon_reload: yes
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
|
||||
- name: upgrade ceph nfs node
|
||||
|
@ -676,7 +674,7 @@
|
|||
enabled: no
|
||||
masked: yes
|
||||
failed_when: false
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-defaults
|
||||
|
@ -686,10 +684,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -702,8 +700,8 @@
|
|||
enabled: yes
|
||||
masked: no
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- ceph_nfs_enable_service
|
||||
- not containerized_deployment | bool
|
||||
- ceph_nfs_enable_service | bool
|
||||
|
||||
- name: systemd restart nfs container
|
||||
systemd:
|
||||
|
@ -713,8 +711,8 @@
|
|||
masked: no
|
||||
daemon_reload: yes
|
||||
when:
|
||||
- ceph_nfs_enable_service
|
||||
- containerized_deployment
|
||||
- ceph_nfs_enable_service | bool
|
||||
- containerized_deployment | bool
|
||||
|
||||
|
||||
- name: upgrade ceph iscsi gateway node
|
||||
|
@ -740,7 +738,7 @@
|
|||
- rbd-target-api
|
||||
- rbd-target-gw
|
||||
- tcmu-runner
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-defaults
|
||||
|
@ -750,10 +748,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -769,7 +767,7 @@
|
|||
- tcmu-runner
|
||||
- rbd-target-api
|
||||
- rbd-target-gw
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
|
||||
- name: upgrade ceph client node
|
||||
|
@ -787,10 +785,10 @@
|
|||
name: ceph-handler
|
||||
- import_role:
|
||||
name: ceph-common
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-container-common
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
- import_role:
|
||||
|
@ -809,25 +807,25 @@
|
|||
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph osd require-osd-release nautilus"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: non container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality
|
||||
command: "ceph --cluster {{ cluster }} osd require-osd-release nautilus"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: container | enable msgr2 protocol
|
||||
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph mon enable-msgr2"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: non container | enable msgr2 protocol
|
||||
command: "ceph --cluster {{ cluster }} mon enable-msgr2"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- import_role:
|
||||
name: ceph-handler
|
||||
|
@ -848,7 +846,7 @@
|
|||
- name: set_fact container_exec_cmd_status
|
||||
set_fact:
|
||||
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: show ceph status
|
||||
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
|
|
|
@ -73,7 +73,7 @@
|
|||
with_items:
|
||||
osds_dir_stat.results
|
||||
when:
|
||||
- osds_dir_stat is defined
|
||||
- osds_dir_stat is defined | bool
|
||||
- item.stat.exists == false
|
||||
|
||||
- name: install sgdisk(gdisk)
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
- name: set_fact container_exec_cmd build docker exec command (containerized)
|
||||
set_fact:
|
||||
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: exit playbook, if can not connect to the cluster
|
||||
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
|
||||
|
@ -89,14 +89,14 @@
|
|||
with_items: "{{ osd_hosts }}"
|
||||
delegate_to: "{{ item }}"
|
||||
failed_when: false
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: fail when admin key is not present
|
||||
fail:
|
||||
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
||||
with_items: "{{ ceph_admin_key.results }}"
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- item.stat.exists == false
|
||||
|
||||
# NOTE(leseb): using '>' is the only way I could have the command working
|
||||
|
@ -110,7 +110,7 @@
|
|||
- "{{ osd_to_replace.split(',') }}"
|
||||
register: osd_to_replace_disks
|
||||
delegate_to: "{{ item.0 }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: zapping osd(s) - container
|
||||
shell: >
|
||||
|
@ -122,7 +122,7 @@
|
|||
- "{{ osd_hosts }}"
|
||||
- "{{ osd_to_replace_disks.results }}"
|
||||
delegate_to: "{{ item.0 }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: zapping osd(s) - non container
|
||||
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
|
||||
|
@ -131,7 +131,7 @@
|
|||
- "{{ osd_hosts }}"
|
||||
- "{{ osd_to_replace_disks.results }}"
|
||||
delegate_to: "{{ item.0 }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: destroying osd(s)
|
||||
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
|
||||
|
@ -140,7 +140,7 @@
|
|||
- "{{ osd_hosts }}"
|
||||
- "{{ osd_to_replace.split(',') }}"
|
||||
delegate_to: "{{ item.0 }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: replace osd(s) - prepare - non container
|
||||
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
|
||||
|
|
|
@ -56,7 +56,7 @@ EXAMPLES = '''
|
|||
location: "{{ hostvars[item]['osd_crush_location'] }}"
|
||||
containerized: "{{ container_exec_cmd }}"
|
||||
with_items: "{{ groups[osd_group_name] }}"
|
||||
when: crush_rule_config
|
||||
when: crush_rule_config | bool
|
||||
'''
|
||||
|
||||
RETURN = '''# '''
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
name: "{{ item }}"
|
||||
groups: _filtered_clients
|
||||
with_items: "{{ groups[client_group_name] }}"
|
||||
when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
|
||||
when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment | bool)
|
||||
|
||||
- name: set_fact delegated_node
|
||||
set_fact:
|
||||
|
@ -50,7 +50,7 @@
|
|||
- "{{ ceph_nfs_ceph_user | default([]) }}"
|
||||
delegate_to: "{{ delegated_node }}"
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- keys | length > 0
|
||||
- inventory_hostname == groups.get('_filtered_clients') | first
|
||||
|
||||
|
@ -61,13 +61,13 @@
|
|||
register: slurp_client_keys
|
||||
delegate_to: "{{ delegated_node }}"
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- keys | length > 0
|
||||
- inventory_hostname == groups.get('_filtered_clients') | first
|
||||
|
||||
- name: pool related tasks
|
||||
when:
|
||||
- condition_copy_admin_key
|
||||
- condition_copy_admin_key | bool
|
||||
- inventory_hostname == groups.get('_filtered_clients', []) | first
|
||||
block:
|
||||
- name: list existing pool(s)
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
|
||||
- name: include create_users_keys.yml
|
||||
include_tasks: create_users_keys.yml
|
||||
when: user_config
|
||||
when: user_config | bool
|
||||
|
|
|
@ -7,5 +7,5 @@
|
|||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
when:
|
||||
- cephx
|
||||
- copy_admin_key
|
||||
- cephx | bool
|
||||
- copy_admin_key | bool
|
||||
|
|
|
@ -9,4 +9,4 @@
|
|||
with_items:
|
||||
- "{{ rbd_client_admin_socket_path }}"
|
||||
- "{{ rbd_client_log_path }}"
|
||||
when: rbd_client_directories
|
||||
when: rbd_client_directories | bool
|
||||
|
|
|
@ -3,25 +3,25 @@
|
|||
file:
|
||||
path: /tmp
|
||||
state: directory
|
||||
when: use_installer
|
||||
when: use_installer | bool
|
||||
|
||||
- name: use mktemp to create name for rundep
|
||||
command: "mktemp /tmp/rundep.XXXXXXXX"
|
||||
register: rundep_location
|
||||
when: use_installer
|
||||
when: use_installer | bool
|
||||
|
||||
- name: copy rundep
|
||||
copy:
|
||||
src: "{{ ansible_dir }}/rundep"
|
||||
dest: "{{ item }}"
|
||||
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
|
||||
when: use_installer
|
||||
when: use_installer | bool
|
||||
|
||||
- name: install ceph dependencies
|
||||
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
|
||||
become: true
|
||||
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
|
||||
when: use_installer
|
||||
when: use_installer | bool
|
||||
|
||||
- name: ensure rsync is installed
|
||||
package:
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
---
|
||||
- name: include create_ceph_initial_dirs.yml
|
||||
include_tasks: create_ceph_initial_dirs.yml
|
||||
when: containerized_deployment|bool
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: config file operations related to OSDs
|
||||
when:
|
||||
- inventory_hostname in groups.get(osd_group_name, [])
|
||||
# the rolling_update.yml playbook sets num_osds to the number of currently
|
||||
# running osds
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
block:
|
||||
- name: count number of osds for lvm scenario
|
||||
set_fact:
|
||||
|
@ -62,7 +62,7 @@
|
|||
|
||||
# ceph-common
|
||||
- name: config file operation for non-containerized scenarios
|
||||
when: not containerized_deployment|bool
|
||||
when: not containerized_deployment | bool
|
||||
block:
|
||||
- name: create ceph conf directory
|
||||
file:
|
||||
|
@ -98,7 +98,7 @@
|
|||
state: directory
|
||||
mode: "0755"
|
||||
delegate_to: localhost
|
||||
when: ceph_conf_local
|
||||
when: ceph_conf_local | bool
|
||||
|
||||
- name: "generate {{ cluster }}.conf configuration file locally"
|
||||
config_template:
|
||||
|
@ -112,10 +112,10 @@
|
|||
config_type: ini
|
||||
when:
|
||||
- inventory_hostname in groups[mon_group_name]
|
||||
- ceph_conf_local
|
||||
- ceph_conf_local | bool
|
||||
|
||||
- name: config file operations for containerized scenarios
|
||||
when: containerized_deployment|bool
|
||||
when: containerized_deployment | bool
|
||||
block:
|
||||
- name: create a local fetch directory if it does not exist
|
||||
file:
|
||||
|
@ -126,7 +126,7 @@
|
|||
become: false
|
||||
run_once: true
|
||||
when:
|
||||
- (cephx or generate_fsid)
|
||||
- (cephx or generate_fsid) | bool
|
||||
- ((inventory_hostname in groups.get(mon_group_name, [])) or
|
||||
(groups.get(nfs_group_name, []) | length > 0) and inventory_hostname == groups.get(nfs_group_name, [])[0])
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@
|
|||
until: docker_image.rc == 0
|
||||
retries: "{{ docker_pull_retry }}"
|
||||
delay: 10
|
||||
when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
|
||||
when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image | bool)
|
||||
|
||||
- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
|
||||
command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||
|
@ -278,15 +278,15 @@
|
|||
copy:
|
||||
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
|
||||
|
||||
- name: load ceph dev image
|
||||
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
|
||||
|
||||
- name: remove tmp ceph dev image file
|
||||
file:
|
||||
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
|
||||
state: absent
|
||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
||||
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
enabled: yes
|
||||
when:
|
||||
- ansible_distribution == 'CentOS'
|
||||
- ceph_docker_enable_centos_extra_repo
|
||||
- ceph_docker_enable_centos_extra_repo | bool
|
||||
tags:
|
||||
with_pkg
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: disable SSL for dashboard
|
||||
command: "{{ container_exec_cmd }} ceph config set mgr mgr/dashboard/ssl false"
|
||||
|
@ -22,7 +22,7 @@
|
|||
group: root
|
||||
mode: 0440
|
||||
when:
|
||||
- dashboard_crt
|
||||
- dashboard_crt | bool
|
||||
- dashboard_protocol == "https"
|
||||
|
||||
- name: copy dashboard SSL certificate key
|
||||
|
@ -33,7 +33,7 @@
|
|||
group: root
|
||||
mode: 0440
|
||||
when:
|
||||
- dashboard_key
|
||||
- dashboard_key | bool
|
||||
- dashboard_protocol == "https"
|
||||
|
||||
- name: generate a Self Signed OpenSSL certificate for dashboard
|
||||
|
@ -42,7 +42,7 @@
|
|||
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-dashboard' -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
|
||||
when:
|
||||
- dashboard_protocol == "https"
|
||||
- not dashboard_key or not dashboard_crt
|
||||
- not dashboard_key | bool or not dashboard_crt | bool
|
||||
|
||||
- name: import dashboard certificate file
|
||||
command: "{{ container_exec_cmd }} ceph config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
|
||||
|
@ -131,25 +131,25 @@
|
|||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-host {{ dashboard_rgw_api_host }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: dashboard_rgw_api_host
|
||||
when: dashboard_rgw_api_host | bool
|
||||
|
||||
- name: set the rgw port
|
||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-port {{ dashboard_rgw_api_port }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: dashboard_rgw_api_port
|
||||
when: dashboard_rgw_api_port | bool
|
||||
|
||||
- name: set the rgw scheme
|
||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-scheme {{ dashboard_rgw_api_scheme }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: dashboard_rgw_api_scheme
|
||||
when: dashboard_rgw_api_scheme | bool
|
||||
|
||||
- name: set the rgw admin resource
|
||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: dashboard_rgw_api_admin_resource
|
||||
when: dashboard_rgw_api_admin_resource | bool
|
||||
|
||||
- name: disable ssl verification for rgw
|
||||
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-ssl-verify False"
|
||||
|
|
|
@ -29,18 +29,18 @@
|
|||
- name: set_fact monitor_name ansible_hostname
|
||||
set_fact:
|
||||
monitor_name: "{{ ansible_hostname }}"
|
||||
when: not mon_use_fqdn
|
||||
when: not mon_use_fqdn | bool
|
||||
|
||||
- name: set_fact monitor_name ansible_fqdn
|
||||
set_fact:
|
||||
monitor_name: "{{ ansible_fqdn }}"
|
||||
when: mon_use_fqdn
|
||||
when: mon_use_fqdn | bool
|
||||
|
||||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
# this task shouldn't run in a rolling_update situation
|
||||
|
@ -55,7 +55,7 @@
|
|||
run_once: true
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
||||
# set this as a default when performing a rolling_update
|
||||
|
@ -73,24 +73,24 @@
|
|||
delegate_to: localhost
|
||||
changed_when: false
|
||||
become: false
|
||||
when: cephx or generate_fsid
|
||||
when: cephx | bool or generate_fsid | bool
|
||||
|
||||
- name: get current fsid
|
||||
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
|
||||
register: rolling_update_fsid
|
||||
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
|
||||
when: rolling_update
|
||||
when: rolling_update | bool
|
||||
|
||||
- name: set_fact fsid
|
||||
set_fact:
|
||||
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
|
||||
when: rolling_update
|
||||
when: rolling_update | bool
|
||||
|
||||
- name: set_fact ceph_current_status (convert to json)
|
||||
set_fact:
|
||||
ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
|
||||
when:
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
- ceph_current_status.rc == 0
|
||||
|
||||
- name: set_fact fsid from ceph_current_status
|
||||
|
@ -100,9 +100,9 @@
|
|||
|
||||
- name: fsid realted tasks
|
||||
when:
|
||||
- generate_fsid
|
||||
- generate_fsid | bool
|
||||
- ceph_current_status.fsid is undefined
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
block:
|
||||
- name: generate cluster fsid
|
||||
shell: python -c 'import uuid; print(str(uuid.uuid4()))'
|
||||
|
@ -117,12 +117,12 @@
|
|||
- name: set_fact mds_name ansible_hostname
|
||||
set_fact:
|
||||
mds_name: "{{ ansible_hostname }}"
|
||||
when: not mds_use_fqdn
|
||||
when: not mds_use_fqdn | bool
|
||||
|
||||
- name: set_fact mds_name ansible_fqdn
|
||||
set_fact:
|
||||
mds_name: "{{ ansible_fqdn }}"
|
||||
when: mds_use_fqdn
|
||||
when: mds_use_fqdn | bool
|
||||
|
||||
- name: set_fact rbd_client_directory_owner ceph
|
||||
set_fact:
|
||||
|
@ -151,7 +151,7 @@
|
|||
when:
|
||||
- devices is defined
|
||||
- inventory_hostname in groups.get(osd_group_name, [])
|
||||
- not osd_auto_discovery|default(False)
|
||||
- not osd_auto_discovery | default(False) | bool
|
||||
|
||||
- name: set_fact build devices from resolved symlinks
|
||||
set_fact:
|
||||
|
@ -160,7 +160,7 @@
|
|||
when:
|
||||
- devices is defined
|
||||
- inventory_hostname in groups.get(osd_group_name, [])
|
||||
- not osd_auto_discovery|default(False)
|
||||
- not osd_auto_discovery | default(False) | bool
|
||||
|
||||
- name: set_fact build final devices list
|
||||
set_fact:
|
||||
|
@ -168,14 +168,14 @@
|
|||
when:
|
||||
- devices is defined
|
||||
- inventory_hostname in groups.get(osd_group_name, [])
|
||||
- not osd_auto_discovery|default(False)
|
||||
- not osd_auto_discovery | default(False) | bool
|
||||
|
||||
- name: set_fact devices generate device list when osd_auto_discovery
|
||||
set_fact:
|
||||
devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
when:
|
||||
- osd_auto_discovery|default(False)
|
||||
- osd_auto_discovery | default(False) | bool
|
||||
- ansible_devices is defined
|
||||
- item.value.removable == "0"
|
||||
- item.value.sectors != "0"
|
||||
|
@ -187,28 +187,28 @@
|
|||
set_fact:
|
||||
ceph_uid: 64045
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
- name: set_fact ceph_uid for red hat or suse based system - non container
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
|
||||
- name: set_fact ceph_uid for debian based system - container
|
||||
set_fact:
|
||||
ceph_uid: 64045
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- name: set_fact ceph_uid for red hat based system - container
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- (ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
|
||||
or (ansible_distribution == 'RedHat'))
|
||||
|
||||
|
@ -216,7 +216,7 @@
|
|||
set_fact:
|
||||
ceph_uid: 167
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_docker_image is search("rhceph")
|
||||
|
||||
- name: set_fact rgw_hostname
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
dest: "/etc/grafana/ceph-dashboard.crt"
|
||||
mode: 0640
|
||||
when:
|
||||
- grafana_crt
|
||||
- grafana_crt | bool
|
||||
- dashboard_protocol == "https"
|
||||
|
||||
- name: copy grafana SSL certificate key
|
||||
|
@ -51,7 +51,7 @@
|
|||
dest: "/etc/grafana/ceph-dashboard.key"
|
||||
mode: 0440
|
||||
when:
|
||||
- grafana_key
|
||||
- grafana_key | bool
|
||||
- dashboard_protocol == "https"
|
||||
|
||||
- name: generate a Self Signed OpenSSL certificate for dashboard
|
||||
|
@ -60,7 +60,7 @@
|
|||
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca
|
||||
when:
|
||||
- dashboard_protocol == "https"
|
||||
- not grafana_key or not grafana_crt
|
||||
- not grafana_key | bool or not grafana_crt | bool
|
||||
|
||||
- name: set owner/group on /etc/grafana
|
||||
file:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: handlers
|
||||
when: not rolling_update
|
||||
when: not rolling_update | bool
|
||||
block:
|
||||
- name: update apt cache
|
||||
apt:
|
||||
|
@ -27,7 +27,7 @@
|
|||
listen: "restart ceph mons"
|
||||
when:
|
||||
- mon_group_name in group_names
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
|
||||
- name: restart ceph mon daemon(s) - non container
|
||||
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
|
||||
|
@ -35,10 +35,10 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- mon_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- hostvars[item]['_mon_handler_called'] | default(False)
|
||||
- not containerized_deployment | bool
|
||||
- hostvars[item]['_mon_handler_called'] | default(False) | bool
|
||||
- mon_socket_stat.rc == 0
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
with_items: "{{ groups[mon_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
run_once: True
|
||||
|
@ -49,11 +49,11 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- mon_group_name in group_names
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_mon_container_stat.get('rc') == 0
|
||||
- hostvars[item]['_mon_handler_called'] | default(False)
|
||||
- hostvars[item]['_mon_handler_called'] | default(False) | bool
|
||||
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
with_items: "{{ groups[mon_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
run_once: True
|
||||
|
@ -84,21 +84,21 @@
|
|||
listen: "restart ceph osds"
|
||||
when:
|
||||
- osd_group_name in group_names
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
|
||||
- name: restart ceph osds daemon(s) - non container
|
||||
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
|
||||
listen: "restart ceph osds"
|
||||
when:
|
||||
- osd_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- not rolling_update
|
||||
- not containerized_deployment | bool
|
||||
- not rolling_update | bool
|
||||
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||
- osd_socket_stat.rc == 0
|
||||
- ceph_current_status.fsid is defined
|
||||
- handler_health_osd_check
|
||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
||||
- handler_health_osd_check | bool
|
||||
- hostvars[item]['_osd_handler_called'] | default(False) | bool
|
||||
with_items: "{{ groups[osd_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
run_once: True
|
||||
|
@ -110,12 +110,12 @@
|
|||
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
|
||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||
- osd_group_name in group_names
|
||||
- containerized_deployment
|
||||
- not rolling_update
|
||||
- containerized_deployment | bool
|
||||
- not rolling_update | bool
|
||||
- inventory_hostname == groups.get(osd_group_name) | last
|
||||
- ceph_osd_container_stat.get('rc') == 0
|
||||
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
|
||||
- handler_health_osd_check
|
||||
- handler_health_osd_check | bool
|
||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
||||
with_items: "{{ groups[osd_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -147,8 +147,8 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- mds_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- hostvars[item]['_mds_handler_called'] | default(False)
|
||||
- not containerized_deployment | bool
|
||||
- hostvars[item]['_mds_handler_called'] | default(False) | bool
|
||||
- mds_socket_stat.rc == 0
|
||||
with_items: "{{ groups[mds_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -160,9 +160,9 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- mds_group_name in group_names
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_mds_container_stat.get('rc') == 0
|
||||
- hostvars[item]['_mds_handler_called'] | default(False)
|
||||
- hostvars[item]['_mds_handler_called'] | default(False) | bool
|
||||
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[mds_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -194,8 +194,8 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- rgw_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- hostvars[item]['_rgw_handler_called'] | default(False)
|
||||
- not containerized_deployment | bool
|
||||
- hostvars[item]['_rgw_handler_called'] | default(False) | bool
|
||||
- rgw_socket_stat.rc == 0
|
||||
with_items: "{{ groups[rgw_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -207,9 +207,9 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- rgw_group_name in group_names
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_rgw_container_stat.get('rc') == 0
|
||||
- hostvars[item]['_rgw_handler_called'] | default(False)
|
||||
- hostvars[item]['_rgw_handler_called'] | default(False) | bool
|
||||
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[rgw_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -241,8 +241,8 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- nfs_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- hostvars[item]['_nfs_handler_called'] | default(False)
|
||||
- not containerized_deployment | bool
|
||||
- hostvars[item]['_nfs_handler_called'] | default(False) | bool
|
||||
- nfs_socket_stat.rc == 0
|
||||
with_items: "{{ groups[nfs_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -254,9 +254,9 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- nfs_group_name in group_names
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_nfs_container_stat.get('rc') == 0
|
||||
- hostvars[item]['_nfs_handler_called'] | default(False)
|
||||
- hostvars[item]['_nfs_handler_called'] | default(False) | bool
|
||||
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[nfs_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -288,8 +288,8 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- rbdmirror_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
||||
- not containerized_deployment | bool
|
||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
|
||||
- rbd_mirror_socket_stat.rc == 0
|
||||
with_items: "{{ groups[rbdmirror_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -301,9 +301,9 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- rbdmirror_group_name in group_names
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_rbd_mirror_container_stat.get('rc') == 0
|
||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False)
|
||||
- hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
|
||||
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[rbdmirror_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -335,10 +335,10 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- mgr_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- hostvars[item]['_mgr_handler_called'] | default(False)
|
||||
- not containerized_deployment | bool
|
||||
- hostvars[item]['_mgr_handler_called'] | default(False) | bool
|
||||
- mgr_socket_stat.rc == 0
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
with_items: "{{ groups[mgr_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
run_once: True
|
||||
|
@ -349,11 +349,11 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
|
||||
- mgr_group_name in group_names
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_mgr_container_stat.get('rc') == 0
|
||||
- hostvars[item]['_mgr_handler_called'] | default(False)
|
||||
- hostvars[item]['_mgr_handler_called'] | default(False) | bool
|
||||
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
with_items: "{{ groups[mgr_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
run_once: True
|
||||
|
@ -384,7 +384,7 @@
|
|||
when:
|
||||
- iscsi_gw_group_name in group_names
|
||||
- ceph_tcmu_runner_stat.get('rc') == 0
|
||||
- hostvars[item]['_tcmu_runner_handler_called'] | default(False)
|
||||
- hostvars[item]['_tcmu_runner_handler_called'] | default(False) | bool
|
||||
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -416,7 +416,7 @@
|
|||
when:
|
||||
- iscsi_gw_group_name in group_names
|
||||
- ceph_rbd_target_gw_stat.get('rc') == 0
|
||||
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False)
|
||||
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False) | bool
|
||||
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
@ -448,7 +448,7 @@
|
|||
when:
|
||||
- iscsi_gw_group_name in group_names
|
||||
- ceph_rbd_target_api_stat.get('rc') == 0
|
||||
- hostvars[item]['_rbd_target_api_handler_called'] | default(False)
|
||||
- hostvars[item]['_rbd_target_api_handler_called'] | default(False) | bool
|
||||
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
|
||||
with_items: "{{ groups[iscsi_gw_group_name] }}"
|
||||
delegate_to: "{{ item }}"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
- name: include check_running_containers.yml
|
||||
include_tasks: check_running_containers.yml
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include check_socket_non_container.yml
|
||||
include_tasks: check_socket_non_container.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
|
|
@ -8,10 +8,10 @@
|
|||
check_mode: no
|
||||
changed_when: false
|
||||
tags: firewall
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- when: (firewalld_pkg_query.get('rc', 1) == 0
|
||||
or is_atomic)
|
||||
or is_atomic | bool)
|
||||
block:
|
||||
- name: start firewalld
|
||||
service:
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
- name: include_tasks configure_firewall.yml
|
||||
include_tasks: configure_firewall.yml
|
||||
when:
|
||||
- configure_firewall
|
||||
- configure_firewall | bool
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
tags: configure_firewall
|
||||
|
||||
- name: include_tasks setup_ntp.yml
|
||||
include_tasks: setup_ntp.yml
|
||||
when: ntp_service_enabled
|
||||
when: ntp_service_enabled | bool
|
||||
tags: configure_ntp
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Installation of NTP daemons needs to be a separate task since installations
|
||||
# can't happen on Atomic
|
||||
- name: install the ntp daemon
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
block:
|
||||
- name: install ntpd
|
||||
package:
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
owner: "root"
|
||||
group: "root"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
||||
- name: deploy gateway settings, used by the ceph_iscsi_config modules
|
||||
template:
|
||||
|
@ -22,7 +22,7 @@
|
|||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: check if a rbd pool exists
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
|
||||
|
|
|
@ -4,19 +4,19 @@
|
|||
|
||||
- name: include non-container/prerequisites.yml
|
||||
include_tasks: non-container/prerequisites.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
|
||||
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
|
||||
# the API for https support.
|
||||
- name: include deploy_ssl_keys.yml
|
||||
include_tasks: deploy_ssl_keys.yml
|
||||
when: generate_crt|bool
|
||||
when: generate_crt | bool
|
||||
|
||||
- name: include non-container/configure_iscsi.yml
|
||||
include_tasks: non-container/configure_iscsi.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: include containerized.yml
|
||||
include_tasks: container/containerized.yml
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
when:
|
||||
- ceph_origin == 'repository'
|
||||
- ceph_repository == 'dev'
|
||||
- ceph_iscsi_config_dev
|
||||
- ceph_iscsi_config_dev | bool
|
||||
block:
|
||||
- name: set_fact ceph_iscsi_repos
|
||||
set_fact:
|
||||
|
|
|
@ -3,20 +3,20 @@
|
|||
include_tasks: create_mds_filesystems.yml
|
||||
when:
|
||||
- inventory_hostname == groups[mds_group_name] | first
|
||||
- not rolling_update
|
||||
- not rolling_update | bool
|
||||
|
||||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include common.yml
|
||||
include_tasks: common.yml
|
||||
|
||||
- name: non_containerized.yml
|
||||
include_tasks: non_containerized.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: containerized.yml
|
||||
include_tasks: containerized.yml
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
||||
- name: copy ceph keyring(s) if needed
|
||||
copy:
|
||||
|
@ -74,7 +74,7 @@
|
|||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- groups.get(mgr_group_name, []) | length > 0
|
||||
- copy_admin_key | bool
|
||||
|
||||
|
@ -84,4 +84,4 @@
|
|||
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
container_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include common.yml
|
||||
include_tasks: common.yml
|
||||
|
||||
- name: include pre_requisite.yml
|
||||
include_tasks: pre_requisite.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: include start_mgr.yml
|
||||
include_tasks: start_mgr.yml
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
notify: restart ceph mgrs
|
||||
|
||||
- name: systemd start mgr
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
changed_when: false
|
||||
|
||||
- name: tasks for MONs when cephx is enabled
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
block:
|
||||
- name: fetch ceph initial keys
|
||||
ceph_key:
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
register: config_crush_hierarchy
|
||||
when:
|
||||
- inventory_hostname == groups.get(mon_group_name) | last
|
||||
- create_crush_tree
|
||||
- create_crush_tree | bool
|
||||
- hostvars[item]['osd_crush_location'] is defined
|
||||
|
||||
- name: create configured crush rules
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
cp /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
|
||||
/etc/ceph/{{ cluster }}.mon.keyring
|
||||
changed_when: false
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: create (and fix ownership of) monitor directory
|
||||
file:
|
||||
|
@ -75,7 +75,7 @@
|
|||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
register: create_custom_admin_secret
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- admin_secret != 'admin_secret'
|
||||
|
||||
- name: set_fact ceph-authtool container command
|
||||
|
@ -88,7 +88,7 @@
|
|||
/var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||
when:
|
||||
- not create_custom_admin_secret.get('skipped')
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- admin_secret != 'admin_secret'
|
||||
|
||||
- name: set_fact ceph-mon container command
|
||||
|
@ -107,7 +107,7 @@
|
|||
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
|
||||
args:
|
||||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
||||
- name: ceph monitor mkfs without keyring
|
||||
command: >
|
||||
|
@ -120,4 +120,4 @@
|
|||
--fsid {{ fsid }}
|
||||
args:
|
||||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
||||
when: not cephx
|
||||
when: not cephx | bool
|
||||
|
|
|
@ -2,29 +2,28 @@
|
|||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include deploy_monitors.yml
|
||||
include_tasks: deploy_monitors.yml
|
||||
when:
|
||||
# we test for both container and non-container
|
||||
- (mon_socket_stat is defined and mon_socket_stat.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
|
||||
- not switch_to_containers | default(False)
|
||||
- not switch_to_containers | default(False) | bool
|
||||
|
||||
- name: include start_monitor.yml
|
||||
include_tasks: start_monitor.yml
|
||||
|
||||
- name: include_tasks ceph_keys.yml
|
||||
include_tasks: ceph_keys.yml
|
||||
when: not switch_to_containers | default(False)
|
||||
when: not switch_to_containers | default(False) | bool
|
||||
|
||||
- name: include secure_cluster.yml
|
||||
include_tasks: secure_cluster.yml
|
||||
when:
|
||||
- secure_cluster
|
||||
- secure_cluster | bool
|
||||
- inventory_hostname == groups[mon_group_name] | first
|
||||
|
||||
- name: crush_rules.yml
|
||||
include_tasks: crush_rules.yml
|
||||
when: crush_rule_config
|
||||
|
||||
when: crush_rule_config | bool
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
state: directory
|
||||
path: "/etc/systemd/system/ceph-mon@.service.d/"
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- ceph_mon_systemd_overrides is defined
|
||||
- ansible_service_mgr == 'systemd'
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
|||
config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}"
|
||||
config_type: "ini"
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- ceph_mon_systemd_overrides is defined
|
||||
- ansible_service_mgr == 'systemd'
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
|||
group: "root"
|
||||
mode: "0644"
|
||||
notify: restart ceph mons
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: start the monitor service
|
||||
systemd:
|
||||
|
|
|
@ -10,6 +10,6 @@
|
|||
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
||||
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||
when:
|
||||
- cephx
|
||||
- item.copy_key|bool
|
||||
- cephx | bool
|
||||
- item.copy_key | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: set_fact container_exec_cmd_nfs
|
||||
set_fact:
|
||||
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: check if "{{ ceph_nfs_rgw_user }}" exists
|
||||
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
|
||||
|
@ -11,7 +11,7 @@
|
|||
changed_when: false
|
||||
failed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: nfs_obj_gw
|
||||
when: nfs_obj_gw | bool
|
||||
|
||||
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
|
||||
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
|
||||
|
@ -20,7 +20,7 @@
|
|||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- nfs_obj_gw
|
||||
- nfs_obj_gw | bool
|
||||
- rgwuser_exists.get('rc', 1) != 0
|
||||
|
||||
- name: set_fact ceph_nfs_rgw_access_key
|
||||
|
@ -28,7 +28,7 @@
|
|||
ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['access_key'] }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- nfs_obj_gw
|
||||
- nfs_obj_gw | bool
|
||||
- ceph_nfs_rgw_access_key is not defined
|
||||
|
||||
- name: set_fact ceph_nfs_rgw_secret_key
|
||||
|
@ -36,5 +36,5 @@
|
|||
ceph_nfs_rgw_secret_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['secret_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['secret_key'] }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- nfs_obj_gw
|
||||
- nfs_obj_gw | bool
|
||||
- ceph_nfs_rgw_secret_key is not defined
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
container_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include common.yml
|
||||
include_tasks: common.yml
|
||||
|
||||
- name: include pre_requisite_non_container.yml
|
||||
include_tasks: pre_requisite_non_container.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: include pre_requisite_container.yml
|
||||
include_tasks: pre_requisite_container.yml
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include create_rgw_nfs_user.yml
|
||||
import_tasks: create_rgw_nfs_user.yml
|
||||
|
@ -23,7 +23,7 @@
|
|||
- name: include ganesha_selinux_fix.yml
|
||||
import_tasks: ganesha_selinux_fix.yml
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- ansible_os_family == 'RedHat'
|
||||
|
||||
- name: copy rgw keyring when deploying internal ganesha with external ceph cluster
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
set_fact:
|
||||
admin_keyring:
|
||||
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
|
||||
when: copy_admin_key
|
||||
when: copy_admin_key | bool
|
||||
|
||||
- name: set_fact ceph_config_keys
|
||||
set_fact:
|
||||
|
@ -15,7 +15,7 @@
|
|||
- name: merge ceph_config_keys and admin_keyring
|
||||
set_fact:
|
||||
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
|
||||
when: copy_admin_key
|
||||
when: copy_admin_key | bool
|
||||
|
||||
- name: stat for config and keys
|
||||
stat:
|
||||
|
@ -55,4 +55,4 @@
|
|||
|
||||
- name: reload dbus configuration
|
||||
command: "killall -SIGHUP dbus-daemon"
|
||||
when: ceph_nfs_dynamic_exports
|
||||
when: ceph_nfs_dynamic_exports | bool
|
||||
|
|
|
@ -39,11 +39,11 @@
|
|||
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
|
||||
- { name: "/var/log/ceph", create: true }
|
||||
- { name: "/var/run/ceph", create: true }
|
||||
when: item.create|bool
|
||||
when: item.create | bool
|
||||
|
||||
- name: cephx related tasks
|
||||
when:
|
||||
- cephx
|
||||
- cephx | bool
|
||||
- groups.get(mon_group_name, []) | length > 0
|
||||
block:
|
||||
- name: copy bootstrap cephx keys
|
||||
|
@ -55,10 +55,10 @@
|
|||
mode: "0600"
|
||||
with_items:
|
||||
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
|
||||
when: item.copy_key|bool
|
||||
when: item.copy_key | bool
|
||||
|
||||
- name: nfs object gateway related tasks
|
||||
when: nfs_obj_gw
|
||||
when: nfs_obj_gw | bool
|
||||
block:
|
||||
- name: create rados gateway keyring
|
||||
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
block:
|
||||
- name: stable repos specific tasks
|
||||
when:
|
||||
- nfs_ganesha_stable
|
||||
- nfs_ganesha_stable | bool
|
||||
- ceph_repository == 'community'
|
||||
block:
|
||||
- name: add nfs-ganesha stable repository
|
||||
|
@ -25,7 +25,7 @@
|
|||
|
||||
- name: debian based systems - dev repos specific tasks
|
||||
when:
|
||||
- nfs_ganesha_dev
|
||||
- nfs_ganesha_dev | bool
|
||||
- ceph_repository == 'dev'
|
||||
block:
|
||||
- name: fetch nfs-ganesha development repository
|
||||
|
@ -62,14 +62,14 @@
|
|||
allow_unauthenticated: yes
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: nfs_obj_gw
|
||||
when: nfs_obj_gw | bool
|
||||
- name: install nfs rgw/cephfs gateway - debian
|
||||
apt:
|
||||
name: nfs-ganesha-ceph
|
||||
allow_unauthenticated: yes
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: nfs_file_gw
|
||||
when: nfs_file_gw | bool
|
||||
|
||||
- name: debian based systems - rhcs installation
|
||||
when:
|
||||
|
@ -88,11 +88,11 @@
|
|||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: nfs_file_gw
|
||||
when: nfs_file_gw | bool
|
||||
- name: install red hat storage nfs obj gateway
|
||||
apt:
|
||||
name: nfs-ganesha-rgw
|
||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: nfs_obj_gw
|
||||
when: nfs_obj_gw | bool
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
- name: set_fact container_exec_cmd_nfs
|
||||
set_fact:
|
||||
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: check if rados index object exists
|
||||
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
|
||||
|
@ -11,14 +11,14 @@
|
|||
failed_when: false
|
||||
register: rados_index_exists
|
||||
check_mode: no
|
||||
when: ceph_nfs_rados_backend
|
||||
when: ceph_nfs_rados_backend | bool
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
||||
- name: create an empty rados index object
|
||||
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
|
||||
when:
|
||||
- ceph_nfs_rados_backend
|
||||
- ceph_nfs_rados_backend | bool
|
||||
- rados_index_exists.rc != 0
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
@ -48,7 +48,7 @@
|
|||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
when: ceph_nfs_dynamic_exports
|
||||
when: ceph_nfs_dynamic_exports | bool
|
||||
|
||||
- name: create exports dir index file
|
||||
copy:
|
||||
|
@ -58,7 +58,7 @@
|
|||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
when: ceph_nfs_dynamic_exports
|
||||
when: ceph_nfs_dynamic_exports | bool
|
||||
|
||||
- name: generate systemd unit file
|
||||
become: true
|
||||
|
@ -68,7 +68,7 @@
|
|||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
notify: restart ceph nfss
|
||||
|
||||
- name: systemd start nfs container
|
||||
|
@ -79,8 +79,8 @@
|
|||
masked: no
|
||||
daemon_reload: yes
|
||||
when:
|
||||
- containerized_deployment
|
||||
- ceph_nfs_enable_service
|
||||
- containerized_deployment | bool
|
||||
- ceph_nfs_enable_service | bool
|
||||
|
||||
- name: start nfs gateway service
|
||||
systemd:
|
||||
|
@ -89,5 +89,5 @@
|
|||
enabled: yes
|
||||
masked: no
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- ceph_nfs_enable_service
|
||||
- not containerized_deployment | bool
|
||||
- ceph_nfs_enable_service | bool
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
|
||||
mode: "0755"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
with_items:
|
||||
- /var/lib/ceph/bootstrap-osd/
|
||||
- /var/lib/ceph/osd/
|
||||
|
@ -22,5 +22,5 @@
|
|||
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
|
||||
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||
when:
|
||||
- cephx
|
||||
- item.copy_key|bool
|
||||
- cephx | bool
|
||||
- item.copy_key | bool
|
||||
|
|
|
@ -3,30 +3,30 @@
|
|||
set_fact:
|
||||
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- osd_objectstore == 'filestore'
|
||||
- not dmcrypt
|
||||
- not dmcrypt | bool
|
||||
|
||||
- name: set_fact docker_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
|
||||
set_fact:
|
||||
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- osd_objectstore == 'filestore'
|
||||
- dmcrypt
|
||||
- dmcrypt | bool
|
||||
|
||||
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
|
||||
set_fact:
|
||||
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- osd_objectstore == 'bluestore'
|
||||
- not dmcrypt
|
||||
- not dmcrypt | bool
|
||||
|
||||
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
|
||||
set_fact:
|
||||
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- osd_objectstore == 'bluestore'
|
||||
- dmcrypt
|
||||
- dmcrypt | bool
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
register: result
|
||||
until: result is succeeded
|
||||
when:
|
||||
- not containerized_deployment
|
||||
- not containerized_deployment | bool
|
||||
- ansible_os_family != 'ClearLinux'
|
||||
|
||||
- name: install numactl when needed
|
||||
|
@ -18,7 +18,7 @@
|
|||
register: result
|
||||
until: result is succeeded
|
||||
when:
|
||||
- containerized_deployment
|
||||
- containerized_deployment | bool
|
||||
- ceph_osd_numactl_opts != ""
|
||||
tags: with_pkg
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
|||
name: lvm2
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
tags: with_pkg
|
||||
|
||||
- name: include_tasks common.yml
|
||||
|
@ -47,13 +47,13 @@
|
|||
include_tasks: scenarios/lvm.yml
|
||||
when:
|
||||
- lvm_volumes|length > 0
|
||||
- not rolling_update|default(False)
|
||||
- not rolling_update|default(False) | bool
|
||||
|
||||
- name: include_tasks scenarios/lvm-batch.yml
|
||||
include_tasks: scenarios/lvm-batch.yml
|
||||
when:
|
||||
- devices|length > 0
|
||||
- not rolling_update|default(False)
|
||||
- not rolling_update|default(False) | bool
|
||||
|
||||
- name: include_tasks start_osds.yml
|
||||
include_tasks: start_osds.yml
|
||||
|
@ -63,8 +63,8 @@
|
|||
openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
|
||||
with_items: "{{ openstack_keys }}"
|
||||
when:
|
||||
- not add_osd|default(False)
|
||||
- openstack_config
|
||||
- not add_osd|default(False) | bool
|
||||
- openstack_config | bool
|
||||
- item.get('mon_cap', None)
|
||||
# it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
|
||||
|
||||
|
@ -72,13 +72,13 @@
|
|||
set_fact:
|
||||
openstack_keys: "{{ openstack_keys_tmp }}"
|
||||
when:
|
||||
- not add_osd|default(False)
|
||||
- not add_osd|default(False) | bool
|
||||
- openstack_keys_tmp is defined
|
||||
|
||||
# Create the pools listed in openstack_pools
|
||||
- name: include openstack_config.yml
|
||||
include_tasks: openstack_config.yml
|
||||
when:
|
||||
- not add_osd|default(False)
|
||||
- openstack_config
|
||||
- not add_osd|default(False) | bool
|
||||
- openstack_config | bool
|
||||
- inventory_hostname == groups[osd_group_name] | last
|
||||
|
|
|
@ -79,7 +79,7 @@
|
|||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
with_items: "{{ openstack_keys }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
||||
- name: fetch openstack cephx key(s)
|
||||
fetch:
|
||||
|
@ -101,6 +101,6 @@
|
|||
- "{{ openstack_keys }}"
|
||||
delegate_to: "{{ item.0 }}"
|
||||
when:
|
||||
- cephx
|
||||
- openstack_config
|
||||
- cephx | bool
|
||||
- openstack_config | bool
|
||||
- item.0 != groups[mon_group_name]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: container specific tasks
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
block:
|
||||
- name: umount ceph disk (if on openstack)
|
||||
mount:
|
||||
|
@ -8,7 +8,7 @@
|
|||
src: /dev/vdb
|
||||
fstype: ext3
|
||||
state: unmounted
|
||||
when: ceph_docker_on_openstack
|
||||
when: ceph_docker_on_openstack | bool
|
||||
|
||||
- name: generate ceph osd docker run script
|
||||
become: true
|
||||
|
@ -47,7 +47,7 @@
|
|||
group: "root"
|
||||
mode: "0644"
|
||||
notify: restart ceph osds
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: systemd start osd
|
||||
systemd:
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
group: "root"
|
||||
mode: "0755"
|
||||
register: "tmpfiles_d"
|
||||
when: disable_transparent_hugepage
|
||||
when: disable_transparent_hugepage | bool
|
||||
|
||||
- name: disable transparent hugepage
|
||||
template:
|
||||
|
@ -34,7 +34,7 @@
|
|||
mode: "0644"
|
||||
force: "yes"
|
||||
validate: "systemd-tmpfiles --create %s"
|
||||
when: disable_transparent_hugepage
|
||||
when: disable_transparent_hugepage | bool
|
||||
|
||||
- name: get default vm.min_free_kbytes
|
||||
command: sysctl -b vm.min_free_kbytes
|
||||
|
@ -58,4 +58,4 @@
|
|||
with_items:
|
||||
- { name: "fs.aio-max-nr", value: "1048576", enable: (osd_objectstore == 'bluestore') }
|
||||
- "{{ os_tuning_params }}"
|
||||
when: item.enable | default(true)
|
||||
when: item.enable | default(true) | bool
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
-o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
|
||||
args:
|
||||
creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: set rbd-mirror key permissions
|
||||
file:
|
||||
|
@ -26,4 +26,4 @@
|
|||
owner: "ceph"
|
||||
group: "ceph"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
---
|
||||
- name: include pre_requisite.yml
|
||||
include_tasks: pre_requisite.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: include common.yml
|
||||
include_tasks: common.yml
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
||||
- name: tasks for non-containerized deployment
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
block:
|
||||
- name: include start_rbd_mirror.yml
|
||||
include_tasks: start_rbd_mirror.yml
|
||||
|
||||
- name: include configure_mirroring.yml
|
||||
include_tasks: configure_mirroring.yml
|
||||
when: ceph_rbd_mirror_configure
|
||||
when: ceph_rbd_mirror_configure | bool
|
||||
|
||||
- name: tasks for containerized deployment
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
block:
|
||||
- name: set_fact container_exec_cmd
|
||||
set_fact:
|
||||
|
|
|
@ -29,5 +29,5 @@
|
|||
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
||||
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||
when:
|
||||
- cephx
|
||||
- item.copy_key|bool
|
||||
- cephx | bool
|
||||
- item.copy_key | bool
|
||||
|
|
|
@ -4,23 +4,23 @@
|
|||
|
||||
- name: include_tasks pre_requisite.yml
|
||||
include_tasks: pre_requisite.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: include_tasks openstack-keystone.yml
|
||||
include_tasks: openstack-keystone.yml
|
||||
when: radosgw_keystone_ssl|bool
|
||||
when: radosgw_keystone_ssl | bool
|
||||
|
||||
- name: include_tasks start_radosgw.yml
|
||||
include_tasks: start_radosgw.yml
|
||||
when: not containerized_deployment
|
||||
when: not containerized_deployment | bool
|
||||
|
||||
- name: include_tasks docker/main.yml
|
||||
include_tasks: docker/main.yml
|
||||
when: containerized_deployment
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: include_tasks multisite/main.yml
|
||||
include_tasks: multisite/main.yml
|
||||
when: rgw_multisite
|
||||
when: rgw_multisite | bool
|
||||
|
||||
- name: rgw pool related tasks
|
||||
when: rgw_create_pools is defined
|
||||
|
|
|
@ -6,14 +6,14 @@
|
|||
- name: include_tasks master.yml
|
||||
include_tasks: master.yml
|
||||
when:
|
||||
- rgw_zonemaster
|
||||
- not rgw_zonesecondary
|
||||
- rgw_zonemaster | bool
|
||||
- not rgw_zonesecondary | bool
|
||||
|
||||
- name: include_tasks secondary.yml
|
||||
include_tasks: secondary.yml
|
||||
when:
|
||||
- not rgw_zonemaster
|
||||
- rgw_zonesecondary
|
||||
- not rgw_zonemaster | bool
|
||||
- rgw_zonesecondary | bool
|
||||
|
||||
# Continue with common tasks
|
||||
- name: add zone to rgw stanza in ceph.conf
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
|
||||
changed_when: false
|
||||
with_items: "{{ rgw_instances }}"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
||||
- name: set rados gateway instance key permissions
|
||||
file:
|
||||
|
@ -14,4 +14,4 @@
|
|||
group: "ceph"
|
||||
mode: "0600"
|
||||
with_items: "{{ rgw_instances }}"
|
||||
when: cephx
|
||||
when: cephx | bool
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
fail:
|
||||
msg: "fqdn configuration is not supported anymore. Use 'use_fqdn_yes_i_am_sure: true' if you really want to use it. See release notes for more details"
|
||||
when:
|
||||
- mon_use_fqdn or mds_use_fqdn
|
||||
- not use_fqdn_yes_i_am_sure
|
||||
- mon_use_fqdn | bool or mds_use_fqdn | bool
|
||||
- not use_fqdn_yes_i_am_sure | bool
|
||||
|
||||
- name: debian based systems tasks
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
@ -31,7 +31,7 @@
|
|||
fail:
|
||||
msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd"
|
||||
when:
|
||||
- ntp_service_enabled
|
||||
- ntp_service_enabled | bool
|
||||
- ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd']
|
||||
|
||||
# Since NTPd can not be installed on Atomic...
|
||||
|
@ -39,7 +39,7 @@
|
|||
fail:
|
||||
msg: installation can't happen on Atomic and ntpd needs to be installed
|
||||
when:
|
||||
- is_atomic | default(False)
|
||||
- is_atomic | default(False) | bool
|
||||
- ansible_os_family == 'RedHat'
|
||||
- ntp_daemon_type == 'ntpd'
|
||||
|
||||
|
@ -58,7 +58,7 @@
|
|||
include_tasks: check_devices.yml
|
||||
when:
|
||||
- osd_group_name in group_names
|
||||
- not osd_auto_discovery | default(False)
|
||||
- not osd_auto_discovery | default(False) | bool
|
||||
- devices|default([])|length > 0
|
||||
|
||||
- name: include check_eth_mon.yml
|
||||
|
@ -89,7 +89,7 @@
|
|||
include_tasks: check_rgw_multisite.yml
|
||||
when:
|
||||
- inventory_hostname in groups.get(rgw_group_name, [])
|
||||
- rgw_multisite
|
||||
- rgw_multisite | bool
|
||||
|
||||
- name: include check_iscsi.yml
|
||||
include_tasks: check_iscsi.yml
|
||||
|
|
|
@ -108,7 +108,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -157,7 +157,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -201,7 +201,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -245,7 +245,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -289,7 +289,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -333,7 +333,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -377,7 +377,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -423,7 +423,7 @@
|
|||
when: inventory_hostname == groups.get('clients', ['']) | first
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -469,7 +469,7 @@
|
|||
name: ceph-container-common
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-config
|
||||
tags: ['ceph_update_config']
|
||||
|
@ -543,7 +543,7 @@
|
|||
name: ceph-prometheus
|
||||
- import_role:
|
||||
name: ceph-grafana
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
|
||||
- hosts: '{{ (groups["mgrs"] | default(groups["mons"]))[0] }}'
|
||||
become: true
|
||||
|
@ -554,4 +554,4 @@
|
|||
tags: ['ceph_update_config']
|
||||
- import_role:
|
||||
name: ceph-dashboard
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
|
|
|
@ -535,7 +535,7 @@
|
|||
name: ceph-container-engine
|
||||
- import_role:
|
||||
name: ceph-node-exporter
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
|
||||
|
||||
- hosts: grafana-server
|
||||
|
@ -559,7 +559,7 @@
|
|||
name: ceph-prometheus
|
||||
- import_role:
|
||||
name: ceph-grafana
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
|
||||
- hosts: '{{ (groups["mgrs"] | default(groups["mons"]))[0] }}'
|
||||
become: true
|
||||
|
@ -567,7 +567,7 @@
|
|||
- import_role:
|
||||
name: ceph-defaults
|
||||
tags: ['ceph_update_config']
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
- import_role:
|
||||
name: ceph-dashboard
|
||||
when: dashboard_enabled
|
||||
when: dashboard_enabled | bool
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
state: present
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
- name: create physical volume
|
||||
command: pvcreate /dev/sdb
|
||||
failed_when: false
|
||||
|
|
|
@ -21,14 +21,14 @@
|
|||
state: present
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
|
||||
- name: generate and upload a random 10Mb file - containerized deployment
|
||||
command: >
|
||||
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c 'dd if=/dev/urandom of=/tmp/testinfra.img bs=1M count=10; {{ s3cmd_cmd }} mb s3://testinfra; {{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra'
|
||||
when:
|
||||
- rgw_zonemaster
|
||||
- containerized_deployment | default(False)
|
||||
- rgw_zonemaster | bool
|
||||
- containerized_deployment | default(False) | bool
|
||||
|
||||
- name: generate and upload a random a 10Mb file - non containerized
|
||||
shell: >
|
||||
|
@ -36,16 +36,16 @@
|
|||
{{ s3cmd_cmd }} mb s3://testinfra;
|
||||
{{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra
|
||||
when:
|
||||
- rgw_zonemaster | default(False)
|
||||
- not containerized_deployment | default(False)
|
||||
- rgw_zonemaster | default(False) | bool
|
||||
- not containerized_deployment | default(False) | bool
|
||||
|
||||
- name: get info from replicated file - containerized deployment
|
||||
command: >
|
||||
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c '{{ s3cmd_cmd }} info s3://testinfra/testinfra.img'
|
||||
register: s3cmd_info_status
|
||||
when:
|
||||
- not rgw_zonemaster | default(False)
|
||||
- containerized_deployment | default(False)
|
||||
- not rgw_zonemaster | default(False) | bool
|
||||
- containerized_deployment | default(False) | bool
|
||||
retries: 10
|
||||
delay: 2
|
||||
until: s3cmd_info_status.get('rc', 1) == 0
|
||||
|
@ -55,8 +55,8 @@
|
|||
{{ s3cmd_cmd }} info s3://testinfra/testinfra.img
|
||||
register: s3cmd_info_status
|
||||
when:
|
||||
- not rgw_zonemaster | default(False)
|
||||
- not containerized_deployment | default(False)
|
||||
- not rgw_zonemaster | default(False) | bool
|
||||
- not containerized_deployment | default(False) | bool
|
||||
retries: 10
|
||||
delay: 2
|
||||
until: s3cmd_info_status.get('rc', 1) == 0
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
dest: /etc/yum.repos.d
|
||||
owner: root
|
||||
group: root
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
|
||||
- name: enable the rhel-7-extras-nightly repo
|
||||
command: "yum-config-manager --enable rhel-7-extras-nightly"
|
||||
|
@ -105,7 +105,7 @@
|
|||
baseurl: "{{ repo_url }}/MON/x86_64/os/"
|
||||
gpgcheck: no
|
||||
enabled: yes
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
|
||||
- hosts: osds
|
||||
gather_facts: false
|
||||
|
@ -119,7 +119,7 @@
|
|||
baseurl: "{{ repo_url }}/OSD/x86_64/os/"
|
||||
gpgcheck: no
|
||||
enabled: yes
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
|
||||
- name: set MTU on eth2
|
||||
command: "ifconfig eth2 mtu 1400 up"
|
||||
|
@ -136,4 +136,4 @@
|
|||
baseurl: "{{ repo_url }}/Tools/x86_64/os/"
|
||||
gpgcheck: no
|
||||
enabled: yes
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
state: present
|
||||
register: result
|
||||
until: result is succeeded
|
||||
when: not is_atomic
|
||||
when: not is_atomic | bool
|
||||
|
||||
- name: centos based systems - configure repos
|
||||
block:
|
||||
|
@ -51,7 +51,7 @@
|
|||
state: absent
|
||||
when:
|
||||
- ansible_distribution == 'CentOS'
|
||||
- not is_atomic
|
||||
- not is_atomic | bool
|
||||
|
||||
- name: resize logical volume for root partition to fill remaining free space
|
||||
lvol:
|
||||
|
@ -59,4 +59,4 @@
|
|||
vg: atomicos
|
||||
size: +100%FREE
|
||||
resizefs: yes
|
||||
when: is_atomic
|
||||
when: is_atomic | bool
|
||||
|
|
Loading…
Reference in New Issue