improve coding style

Keywords requiring only one item shouldn't express it by creating a
list with single item.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit 739a662c80)

Conflicts:
	roles/ceph-mon/tasks/ceph_keys.yml
	roles/ceph-validate/tasks/check_devices.yml
pull/3936/head
Rishabh Dave 2019-04-01 21:16:15 +05:30 committed by mergify[bot]
parent 4752327340
commit 06b3ab2a6b
110 changed files with 495 additions and 964 deletions

View File

@ -28,8 +28,7 @@
pre_tasks:
- name: gather facts
setup:
when:
- not delegate_facts_host | bool
when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
@ -39,8 +38,7 @@
- "{{ groups['mons'] }}"
- "{{ groups['osds'] }}"
run_once: True
when:
- delegate_facts_host | bool
when: delegate_facts_host | bool
tasks:
- import_role:
@ -63,8 +61,7 @@
- name: gather facts
setup:
when:
- not delegate_facts_host | bool
when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
@ -74,8 +71,7 @@
- "{{ groups['mons'] }}"
- "{{ groups['osds'] }}"
run_once: True
when:
- delegate_facts_host | bool
when: delegate_facts_host | bool
# this task is needed so we can skip the openstack_config.yml include in roles/ceph-osd
- name: set_fact add_osd

View File

@ -1,7 +1,6 @@
- name: creates logical volumes for the bucket index or fs journals on a single device.
become: true
hosts:
- osds
hosts: osds
vars:
logfile: |
@ -56,16 +55,14 @@
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
pvs: "{{ nvme_device }}"
with_items:
- "{{ nvme_device_lvs }}"
with_items: "{{ nvme_device_lvs }}"
- name: create lvs for fs journals for hdd devices
lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"
- name: create the lv for data portion of the bucket index on the nvme device
lvol:
@ -73,8 +70,7 @@
vg: "{{ nvme_vg_name }}"
size: "{{ item.size }}"
pvs: "{{ nvme_device }}"
with_items:
- "{{ nvme_device_lvs }}"
with_items: "{{ nvme_device_lvs }}"
# Make sure all hdd devices have a unique volume group
- name: create vgs for all hdd devices
@ -84,8 +80,7 @@
pesize: 4
state: present
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"
- name: create lvs for the data portion on hdd devices
lvol:
@ -93,8 +88,7 @@
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
size: "{{ hdd_lv_size }}"
pvs: "{{ item }}"
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"
- name: "write output for osds.yml to {{ logfile_path }}"
become: false

View File

@ -1,7 +1,6 @@
- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
become: true
hosts:
- osds
hosts: osds
vars_prompt:
- name: ireallymeanit
@ -52,8 +51,7 @@
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
with_items:
- "{{ nvme_device_lvs }}"
with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing hdd data lvs
lvol:
@ -61,8 +59,7 @@
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: yes
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"
- name: tear down any existing lv of journal for bucket index
lvol:
@ -70,8 +67,7 @@
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
with_items:
- "{{ nvme_device_lvs }}"
with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing lvs of hdd journals
lvol:
@ -79,8 +75,7 @@
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"
## Volume Groups
- name: remove vg on nvme device
@ -94,8 +89,7 @@
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: yes
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"
## Physical Vols
- name: tear down pv for nvme device
@ -103,5 +97,4 @@
- name: tear down pv for each hdd device
command: "pvremove --force --yes {{ item }}"
with_items:
- "{{ hdd_devices }}"
with_items: "{{ hdd_devices }}"

View File

@ -53,8 +53,7 @@
vars:
mds_group_name: mdss
hosts:
- "{{ mds_group_name|default('mdss') }}"
hosts: "{{ mds_group_name|default('mdss') }}"
gather_facts: false # Already gathered previously
@ -75,8 +74,7 @@
vars:
mgr_group_name: mgrs
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
hosts: "{{ mgr_group_name|default('mgrs') }}"
gather_facts: false # Already gathered previously
@ -98,8 +96,7 @@
vars:
rgw_group_name: rgws
hosts:
- "{{ rgw_group_name|default('rgws') }}"
hosts: "{{ rgw_group_name|default('rgws') }}"
gather_facts: false # Already gathered previously
@ -120,8 +117,7 @@
vars:
rbdmirror_group_name: rbdmirrors
hosts:
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
gather_facts: false # Already gathered previously
@ -141,8 +137,7 @@
vars:
nfs_group_name: nfss
hosts:
- "{{ nfs_group_name|default('nfss') }}"
hosts: "{{ nfs_group_name|default('nfss') }}"
gather_facts: false # Already gathered previously
@ -164,8 +159,7 @@
osd_group_name: osds
reboot_osd_node: False
hosts:
- "{{ osd_group_name|default('osds') }}"
hosts: "{{ osd_group_name|default('osds') }}"
gather_facts: false # Already gathered previously
@ -368,8 +362,7 @@
- name: resolve parent device
command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
with_items:
- "{{ combined_devices_list }}"
with_items: "{{ combined_devices_list }}"
- name: set_fact resolved_parent_device
set_fact:
@ -395,16 +388,14 @@
parted -s /dev/"{{ item }}" mklabel gpt
partprobe /dev/"{{ item }}"
udevadm settle --timeout=600
with_items:
- "{{ resolved_parent_device }}"
with_items: "{{ resolved_parent_device }}"
- name: purge ceph mon cluster
vars:
mon_group_name: mons
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
gather_facts: false # already gathered previously
@ -556,8 +547,7 @@
module: command
echo requesting data removal
become: false
notify:
- remove data
notify: remove data
- name: purge dnf cache
command: dnf clean all
@ -595,15 +585,13 @@
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ systemd_files.files }}"
with_items: "{{ systemd_files.files }}"
when: ansible_service_mgr == 'systemd'
- name: purge fetch directory
hosts:
- localhost
hosts: localhost
gather_facts: false

View File

@ -4,8 +4,7 @@
- name: confirm whether user really meant to purge the cluster
hosts:
- localhost
hosts: localhost
gather_facts: false
@ -36,8 +35,7 @@
- name: purge ceph mds cluster
hosts:
- "{{ mds_group_name|default('mdss') }}"
hosts: "{{ mds_group_name|default('mdss') }}"
become: true
@ -69,8 +67,7 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
ignore_errors: true
- name: purge ceph iscsigws cluster
@ -120,14 +117,12 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
ignore_errors: true
- name: purge ceph mgr cluster
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
hosts: "{{ mgr_group_name|default('mgrs') }}"
become: true
tasks:
@ -157,14 +152,12 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
ignore_errors: true
- name: purge ceph rgw cluster
hosts:
- "{{ rgw_group_name|default('rgws') }}"
hosts: "{{ rgw_group_name|default('rgws') }}"
become: true
@ -215,8 +208,7 @@
- name: purge ceph rbd-mirror cluster
hosts:
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
become: true
@ -248,14 +240,12 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
- name: purge ceph nfs cluster
hosts:
- "{{ nfs_group_name|default('nfss') }}"
hosts: "{{ nfs_group_name|default('nfss') }}"
become: true
@ -296,14 +286,12 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
- name: purge ceph osd cluster
hosts:
- "{{ osd_group_name | default('osds') }}"
hosts: "{{ osd_group_name | default('osds') }}"
gather_facts: true
become: true
@ -380,8 +368,7 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
ignore_errors: true
- name: include vars from group_vars/osds.yml
@ -403,13 +390,11 @@
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ osd_disk_prepare_logs.files }}"
with_items: "{{ osd_disk_prepare_logs.files }}"
- name: purge ceph mon cluster
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
become: true
@ -451,8 +436,7 @@
name: "{{ ceph_docker_image }}"
tag: "{{ ceph_docker_image_tag }}"
force: yes
tags:
remove_img
tags: remove_img
ignore_errors: true
- name: check container hosts
@ -509,8 +493,7 @@
become: true
tags:
with_pkg
tags: with_pkg
tasks:
- name: check if it is Atomic host
@ -629,8 +612,7 @@
- name: purge fetch directory
hosts:
- localhost
hosts: localhost
gather_facts: false

View File

@ -59,8 +59,7 @@
- name: gather facts
setup:
when:
- not delegate_facts_host | bool
when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
@ -68,8 +67,7 @@
delegate_facts: True
with_items: "{{ groups['all'] }}"
run_once: true
when:
- delegate_facts_host | bool
when: delegate_facts_host | bool
- set_fact: rolling_update=true
@ -78,8 +76,7 @@
health_mon_check_retries: 5
health_mon_check_delay: 15
upgrade_ceph_packages: True
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: True
tasks:
@ -87,8 +84,7 @@
file:
path: /etc/profile.d/ceph-aliases.sh
state: absent
when:
- containerized_deployment
when: containerized_deployment
- name: set mon_host_count
set_fact:
@ -97,8 +93,7 @@
- name: fail when less than three monitors
fail:
msg: "Upgrade of cluster with less than three monitors is not supported."
when:
- mon_host_count | int < 3
when: mon_host_count | int < 3
- name: select a running monitor
set_fact:
@ -150,8 +145,7 @@
enabled: no
masked: yes
ignore_errors: True
when:
- not containerized_deployment
when: not containerized_deployment
# NOTE: we mask the service so the RPM can't restart it
# after the package gets upgraded
@ -162,8 +156,7 @@
enabled: no
masked: yes
ignore_errors: True
when:
- not containerized_deployment
when: not containerized_deployment
# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
@ -171,9 +164,8 @@
systemd:
name: ceph-mgr@{{ ansible_hostname }}
masked: yes
when:
- inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
- name: set osd flags
command: ceph --cluster {{ cluster }} osd set {{ item }}
@ -214,8 +206,7 @@
name: ceph-mon@{{ monitor_name }}
state: started
enabled: yes
when:
- not containerized_deployment
when: not containerized_deployment
- name: start ceph mgr
systemd:
@ -223,8 +214,7 @@
state: started
enabled: yes
ignore_errors: True # if no mgr collocated with mons
when:
- not containerized_deployment
when: not containerized_deployment
- name: restart containerized ceph mon
systemd:
@ -232,8 +222,7 @@
state: restarted
enabled: yes
daemon_reload: yes
when:
- containerized_deployment
when: containerized_deployment
- name: non container | waiting for the monitor to join the quorum...
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
@ -244,8 +233,7 @@
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when:
- not containerized_deployment
when: not containerized_deployment
- name: container | waiting for the containerized monitor to join the quorum...
command: >
@ -257,22 +245,19 @@
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when:
- containerized_deployment
when: containerized_deployment
- name: upgrade ceph mgr nodes when implicitly collocated on monitors
vars:
health_mon_check_retries: 5
health_mon_check_delay: 15
upgrade_ceph_packages: True
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: True
tasks:
- name: upgrade mgrs when no mgr group explicitly defined in inventory
when:
- groups.get(mgr_group_name, []) | length == 0
when: groups.get(mgr_group_name, []) | length == 0
block:
- name: stop ceph mgr
systemd:
@ -302,8 +287,7 @@
vars:
upgrade_ceph_packages: True
ceph_release: "{{ ceph_stable_release }}"
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
hosts: "{{ mgr_group_name|default('mgrs') }}"
serial: 1
become: True
tasks:
@ -342,8 +326,7 @@
health_osd_check_delay: 30
upgrade_ceph_packages: True
hosts:
- "{{ osd_group_name|default('osds') }}"
hosts: "{{ osd_group_name|default('osds') }}"
serial: 1
become: True
tasks:
@ -372,8 +355,7 @@
enabled: no
masked: yes
with_items: "{{ osd_ids.stdout_lines }}"
when:
- not containerized_deployment
when: not containerized_deployment
- name: set num_osds for non container
set_fact:
@ -411,8 +393,7 @@
enabled: yes
masked: no
with_items: "{{ osd_ids.stdout_lines }}"
when:
- not containerized_deployment
when: not containerized_deployment
- name: restart containerized ceph osd
systemd:
@ -422,8 +403,7 @@
masked: no
daemon_reload: yes
with_items: "{{ osd_names.stdout_lines }}"
when:
- containerized_deployment
when: containerized_deployment
- name: scan ceph-disk osds with ceph-volume if deploying nautilus
command: "ceph-volume --cluster={{ cluster }} simple scan"
@ -444,8 +424,7 @@
- name: set_fact docker_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
when: containerized_deployment
- name: get osd versions
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
@ -481,14 +460,12 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
when:
- (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
- name: unset osd flags
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
become: True
@ -501,8 +478,7 @@
- name: set_fact docker_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
when: containerized_deployment
- name: unset osd flags
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
@ -533,8 +509,7 @@
- name: upgrade ceph mdss cluster
vars:
upgrade_ceph_packages: True
hosts:
- "{{ mds_group_name|default('mdss') }}"
hosts: "{{ mds_group_name|default('mdss') }}"
serial: 1
become: True
tasks:
@ -544,8 +519,7 @@
state: stopped
enabled: no
masked: yes
when:
- not containerized_deployment
when: not containerized_deployment
- import_role:
name: ceph-defaults
@ -570,8 +544,7 @@
state: started
enabled: yes
masked: no
when:
- not containerized_deployment
when: not containerized_deployment
- name: restart ceph mds
systemd:
@ -580,15 +553,13 @@
enabled: yes
masked: no
daemon_reload: yes
when:
- containerized_deployment
when: containerized_deployment
- name: upgrade ceph rgws cluster
vars:
upgrade_ceph_packages: True
hosts:
- "{{ rgw_group_name|default('rgws') }}"
hosts: "{{ rgw_group_name|default('rgws') }}"
serial: 1
become: True
tasks:
@ -613,8 +584,7 @@
enabled: no
masked: yes
with_items: "{{ rgw_instances }}"
when:
- not containerized_deployment
when: not containerized_deployment
- import_role:
name: ceph-handler
@ -637,15 +607,13 @@
masked: no
daemon_reload: yes
with_items: "{{ rgw_instances }}"
when:
- containerized_deployment
when: containerized_deployment
- name: upgrade ceph rbd mirror node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ rbd_mirror_group_name|default('rbdmirrors') }}"
hosts: "{{ rbd_mirror_group_name|default('rbdmirrors') }}"
serial: 1
become: True
tasks:
@ -679,8 +647,7 @@
state: started
enabled: yes
masked: no
when:
- not containerized_deployment
when: not containerized_deployment
- name: restart containerized ceph rbd mirror
systemd:
@ -689,15 +656,13 @@
enabled: yes
masked: no
daemon_reload: yes
when:
- containerized_deployment
when: containerized_deployment
- name: upgrade ceph nfs node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ nfs_group_name|default('nfss') }}"
hosts: "{{ nfs_group_name|default('nfss') }}"
serial: 1
become: True
tasks:
@ -711,8 +676,7 @@
enabled: no
masked: yes
failed_when: false
when:
- not containerized_deployment
when: not containerized_deployment
- import_role:
name: ceph-defaults
@ -811,8 +775,7 @@
- name: upgrade ceph client node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ client_group_name|default('clients') }}"
hosts: "{{ client_group_name|default('clients') }}"
serial: "{{ client_update_batch | default(20) }}"
become: True
tasks:
@ -834,8 +797,7 @@
name: ceph-client
- name: complete upgrade
hosts:
- all
hosts: all
become: True
tasks:
- import_role:
@ -877,8 +839,7 @@
- name: show ceph status
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
become: True
tasks:
- import_role:
@ -887,8 +848,7 @@
- name: set_fact docker_exec_cmd_status
set_fact:
docker_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
when: containerized_deployment
- name: show ceph status
command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"

View File

@ -14,8 +14,7 @@
- name: gather facts and check the init system
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
become: true
@ -23,8 +22,7 @@
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove monitor from the ceph cluster
hosts:
- localhost
hosts: localhost
become: true
vars_prompt:
- name: ireallymeanit
@ -40,8 +38,7 @@
fail:
msg: "You are about to shrink the only monitor present in the cluster.
If you really want to do that, please use the purge-cluster playbook."
when:
- groups[mon_group_name] | length | int == 1
when: groups[mon_group_name] | length | int == 1
- name: exit playbook, if no monitor was given
fail:
@ -49,14 +46,12 @@
Exiting shrink-cluster playbook, no monitor was removed.
On the command line when invoking the playbook, you can use
-e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs."
when:
- mon_to_kill is not defined
when: mon_to_kill is not defined
- name: exit playbook, if the monitor is not part of the inventory
fail:
msg: "It seems that the host given is not part of your inventory, please make sure it is."
when:
- mon_to_kill not in groups[mon_group_name]
when: mon_to_kill not in groups[mon_group_name]
- name: exit playbook, if user did not mean to shrink cluster
fail:
@ -64,8 +59,7 @@
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when:
- ireallymeanit != 'yes'
when: ireallymeanit != 'yes'
- import_role:
name: ceph-defaults
@ -78,8 +72,7 @@
set_fact:
mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when:
- item != mon_to_kill
when: item != mon_to_kill
- name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
@ -136,15 +129,13 @@
msg: "The monitor has been successfully removed from the cluster.
Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
run_once: true
when:
- mon_to_kill_hostname not in result.stdout
when: mon_to_kill_hostname not in result.stdout
- name: fail if monitor is still part of the cluster
fail:
msg: "Monitor appears to still be part of the cluster, please check what happened."
run_once: true
when:
- mon_to_kill_hostname in result.stdout
when: mon_to_kill_hostname in result.stdout
- name: show ceph health
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"

View File

@ -23,8 +23,7 @@
- name: confirm whether user really meant to remove osd(s) from the cluster
hosts:
- localhost
hosts: localhost
become: true

View File

@ -7,8 +7,7 @@
- name: gather facts and check the init system
hosts:
- "{{ osd_group_name|default('osds') }}"
hosts: "{{ osd_group_name|default('osds') }}"
become: true
@ -17,8 +16,7 @@
- name: query each host for storage device inventory
hosts:
- "{{ osd_group_name|default('osds') }}"
hosts: "{{ osd_group_name|default('osds') }}"
become: true

View File

@ -3,8 +3,7 @@
- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons
hosts:
- localhost
hosts: localhost
gather_facts: false
@ -53,8 +52,7 @@
containerized_deployment: true
switch_to_containers: True
mon_group_name: mons
hosts:
- "{{ mon_group_name|default('mons') }}"
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: true
pre_tasks:
@ -147,8 +145,7 @@
- name: switching from non-containerized to containerized ceph mgr
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
hosts: "{{ mgr_group_name|default('mgrs') }}"
vars:
containerized_deployment: true
@ -217,8 +214,7 @@
containerized_deployment: true
osd_group_name: osds
hosts:
- "{{ osd_group_name|default('osds') }}"
hosts: "{{ osd_group_name|default('osds') }}"
serial: 1
become: true
@ -284,8 +280,7 @@
shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false
failed_when: false
when:
- ldb_files.rc == 0
when: ldb_files.rc == 0
- name: check if containerized osds are already running
command: >
@ -305,10 +300,8 @@
umount /var/lib/ceph/osd/{{ item }}
changed_when: false
failed_when: false
with_items:
- "{{ osd_dirs.stdout_lines }}"
when:
- osd_running.rc != 0
with_items: "{{ osd_dirs.stdout_lines }}"
when: osd_running.rc != 0
tasks:
- import_role:
@ -344,14 +337,12 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
when:
- (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0
- name: switching from non-containerized to containerized ceph mds
hosts:
- "{{ mds_group_name|default('mdss') }}"
hosts: "{{ mds_group_name|default('mdss') }}"
vars:
containerized_deployment: true
@ -411,8 +402,7 @@
- name: switching from non-containerized to containerized ceph rgw
hosts:
- "{{ rgw_group_name|default('rgws') }}"
hosts: "{{ rgw_group_name|default('rgws') }}"
vars:
containerized_deployment: true
@ -469,8 +459,7 @@
- name: switching from non-containerized to containerized ceph rbd-mirror
hosts:
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
vars:
containerized_deployment: true
@ -529,8 +518,7 @@
- name: switching from non-containerized to containerized ceph nfs
hosts:
- "{{ nfs_group_name|default('nfss') }}"
hosts: "{{ nfs_group_name|default('nfss') }}"
vars:
containerized_deployment: true

View File

@ -481,8 +481,7 @@
service: >
name={{ item }}
state=stopped
with_items:
- radosgw
with_items: radosgw
when: migration_completed.stat.exists == False
- name: Wait for radosgw to be down
@ -524,16 +523,14 @@
shell: >
{{ item }}
chdir=/var/lib/ceph/
with_items:
- cp etc/ceph/* /etc/ceph/
with_items: cp etc/ceph/* /etc/ceph/
when: migration_completed.stat.exists == False
- name: Start rados gateway
service: >
name={{ item }}
state=started
with_items:
- radosgw
with_items: radosgw
when: migration_completed.stat.exists == False
- name: Wait for radosgw to be up again

View File

@ -27,8 +27,7 @@
journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
data_typecode: 4fbd7e29-9d25-41b8-afd0-062c0ceff05d
devices: []
hosts:
- "{{ osd_group_name }}"
hosts: "{{ osd_group_name }}"
tasks:
@ -68,14 +67,12 @@
- set_fact:
owner: 167
group: 167
when:
- ansible_os_family == "RedHat"
when: ansible_os_family == "RedHat"
- set_fact:
owner: 64045
group: 64045
when:
- ansible_os_family == "Debian"
when: ansible_os_family == "Debian"
- name: change partitions ownership
file:
@ -96,6 +93,5 @@
with_subelements:
- "{{ devices }}"
- partitions
when:
item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$')
when: item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$')
...

View File

@ -33,8 +33,7 @@
osd_group_name: osds
journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
osds_journal_devices: []
hosts:
- "{{ osd_group_name }}"
hosts: "{{ osd_group_name }}"
serial: 1
tasks:
@ -51,8 +50,7 @@
msg: exit playbook osd(s) is not on this host
with_items:
osds_dir_stat.results
when:
- osds_dir_stat is defined and item.stat.exists == false
when: osds_dir_stat is defined and item.stat.exists == false
- name: install sgdisk(gdisk)
package:
@ -75,40 +73,33 @@
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"
- name: stop osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"
- name: flush osd(s) journal
command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"
when: osds_journal_devices is defined
- name: update osd(s) journal soft link
command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"
- name: update osd(s) journal uuid
command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"
- name: initialize osd(s) new journal
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"
- name: start osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
with_items:
- "{{ osds.results }}"
with_items: "{{ osds.results }}"

View File

@ -96,24 +96,20 @@
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
with_items:
- "{{ osds_uuid.results }}"
with_items: "{{ osds_uuid.results }}"
- name: stop osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
with_items:
- "{{ osds_uuid.results }}"
with_items: "{{ osds_uuid.results }}"
- name: reinitialize osd(s) journal in new ssd
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items:
- "{{ osds_uuid.results }}"
with_items: "{{ osds_uuid.results }}"
- name: start osd(s) service
service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
with_items:
- "{{ osds_uuid.results }}"
with_items: "{{ osds_uuid.results }}"

View File

@ -26,8 +26,7 @@
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to replace osd(s)
hosts:
- localhost
hosts: localhost
become: true
vars_prompt:
- name: ireallymeanit
@ -90,8 +89,7 @@
with_items: "{{ osd_hosts }}"
delegate_to: "{{ item }}"
failed_when: false
when:
- not containerized_deployment
when: not containerized_deployment
- name: fail when admin key is not present
fail:
@ -112,8 +110,7 @@
- "{{ osd_to_replace.split(',') }}"
register: osd_to_replace_disks
delegate_to: "{{ item.0 }}"
when:
- containerized_deployment
when: containerized_deployment
- name: zapping osd(s) - container
shell: >
@ -125,8 +122,7 @@
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
delegate_to: "{{ item.0 }}"
when:
- containerized_deployment
when: containerized_deployment
- name: zapping osd(s) - non container
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
@ -135,8 +131,7 @@
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
delegate_to: "{{ item.0 }}"
when:
- not containerized_deployment
when: not containerized_deployment
- name: destroying osd(s)
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
@ -145,8 +140,7 @@
- "{{ osd_hosts }}"
- "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ item.0 }}"
when:
- not containerized_deployment
when: not containerized_deployment
- name: replace osd(s) - prepare - non container
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)

View File

@ -56,8 +56,7 @@ EXAMPLES = '''
location: "{{ hostvars[item]['osd_crush_location'] }}"
containerized: "{{ docker_exec_cmd }}"
with_items: "{{ groups[osd_group_name] }}"
when:
- crush_rule_config
when: crush_rule_config
'''
RETURN = '''# '''

View File

@ -9,22 +9,19 @@
raw: apt-get -y install python-simplejson
ignore_errors: yes
register: result
when:
- systempython.stat is undefined or not systempython.stat.exists
when: systempython.stat is undefined or not systempython.stat.exists
until: result is succeeded
- name: install python for fedora
raw: dnf -y install python3; ln -sf /usr/bin/python3 /usr/bin/python creates=/usr/bin/python
ignore_errors: yes
register: result
when:
- systempython.stat is undefined or not systempython.stat.exists
when: systempython.stat is undefined or not systempython.stat.exists
until: (result is succeeded) and ('Failed' not in result.stdout)
- name: install python for opensuse
raw: zypper -n install python-base creates=/usr/bin/python2.7
ignore_errors: yes
register: result
when:
- systempython.stat is undefined or not systempython.stat.exists
when: systempython.stat is undefined or not systempython.stat.exists
until: result is succeeded

View File

@ -7,8 +7,7 @@
state: present
register: result
until: result is succeeded
tags:
- package-install
tags: package-install
- name: create minion.d directory
file:

View File

@ -2,15 +2,13 @@
- name: set_fact keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact:
keys_tmp: "{{ keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
when:
- item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
when: item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
with_items: "{{ keys }}"
- name: set_fact keys - override keys_tmp with keys
set_fact:
keys: "{{ keys_tmp }}"
when:
- keys_tmp is defined
when: keys_tmp is defined
# dummy container setup is only supported on x86_64
# when running with containerized_deployment: true this task
@ -22,8 +20,7 @@
name: "{{ item }}"
groups: _filtered_clients
with_items: "{{ groups[client_group_name] }}"
when:
- (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
- name: set_fact delegated_node
set_fact:
@ -58,8 +55,7 @@
- name: slurp client cephx key(s)
slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items:
- "{{ keys }}"
with_items: "{{ keys }}"
register: slurp_client_keys
delegate_to: "{{ delegated_node }}"
when:
@ -129,8 +125,7 @@
with_items: "{{ pools | unique }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
when:
- item.application is defined
when: item.application is defined
- name: get client cephx keys
copy:
@ -139,7 +134,5 @@
mode: "{{ item.item.get('mode', '0600') }}"
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
with_items:
- "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
when:
- not item.get('skipped', False)
with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
when: not item.get('skipped', False)

View File

@ -4,5 +4,4 @@
- name: include create_users_keys.yml
include_tasks: create_users_keys.yml
when:
- user_config
when: user_config

View File

@ -6,8 +6,7 @@
create: yes
line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER="
when:
- ansible_os_family in ["RedHat", "Suse"]
when: ansible_os_family in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
@ -20,8 +19,7 @@
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: debian based systems - configure cluster name
when:
- ansible_os_family == "Debian"
when: ansible_os_family == "Debian"
block:
- name: check /etc/default/ceph exist
stat:
@ -30,8 +28,7 @@
check_mode: no
- name: configure cluster name
when:
- etc_default_ceph.stat.exists
when: etc_default_ceph.stat.exists
block:
- name: when /etc/default/ceph is not dir
lineinfile:
@ -40,8 +37,7 @@
create: yes
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
when:
- not etc_default_ceph.stat.isdir
when: not etc_default_ceph.stat.isdir
- name: when /etc/default/ceph is dir
lineinfile:
@ -50,5 +46,4 @@
create: yes
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
when:
- etc_default_ceph.stat.isdir
when: etc_default_ceph.stat.isdir

View File

@ -9,8 +9,7 @@
when:
- ansible_os_family == 'Debian'
- etc_default_ceph.stat.exists
notify:
- restart ceph osds
notify: restart ceph osds
- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
lineinfile:
@ -19,7 +18,5 @@
create: yes
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
when:
- ansible_os_family == 'RedHat'
notify:
- restart ceph osds
when: ansible_os_family == 'RedHat'
notify: restart ceph osds

View File

@ -9,5 +9,4 @@
with_items:
- "{{ rbd_client_admin_socket_path }}"
- "{{ rbd_client_log_path }}"
when:
- rbd_client_directories
when: rbd_client_directories

View File

@ -1,25 +1,20 @@
---
- name: include debian_community_repository.yml
include_tasks: debian_community_repository.yml
when:
- ceph_repository == 'community'
when: ceph_repository == 'community'
- name: include debian_rhcs_repository.yml
include_tasks: debian_rhcs_repository.yml
when:
- ceph_repository == 'rhcs'
when: ceph_repository == 'rhcs'
- name: include debian_dev_repository.yml
include_tasks: debian_dev_repository.yml
when:
- ceph_repository == 'dev'
when: ceph_repository == 'dev'
- name: include debian_custom_repository.yml
include_tasks: debian_custom_repository.yml
when:
- ceph_repository == 'custom'
when: ceph_repository == 'custom'
- name: include debian_uca_repository.yml
include_tasks: debian_uca_repository.yml
when:
- ceph_repository == 'uca'
when: ceph_repository == 'uca'

View File

@ -3,29 +3,25 @@
file:
path: /tmp
state: directory
when:
- use_installer
when: use_installer
- name: use mktemp to create name for rundep
command: "mktemp /tmp/rundep.XXXXXXXX"
register: rundep_location
when:
- use_installer
when: use_installer
- name: copy rundep
copy:
src: "{{ ansible_dir }}/rundep"
dest: "{{ item }}"
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
when:
- use_installer
when: use_installer
- name: install ceph dependencies
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
become: true
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
when:
- use_installer
when: use_installer
- name: ensure rsync is installed
package:

View File

@ -1,23 +1,19 @@
---
- name: include redhat_community_repository.yml
include_tasks: redhat_community_repository.yml
when:
- ceph_repository == 'community'
when: ceph_repository == 'community'
- name: include redhat_rhcs_repository.yml
include_tasks: redhat_rhcs_repository.yml
when:
- ceph_repository == 'rhcs'
when: ceph_repository == 'rhcs'
- name: include redhat_dev_repository.yml
include_tasks: redhat_dev_repository.yml
when:
- ceph_repository == 'dev'
when: ceph_repository == 'dev'
- name: include redhat_custom_repository.yml
include_tasks: redhat_custom_repository.yml
when:
- ceph_repository == 'custom'
when: ceph_repository == 'custom'
# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version
- name: purge yum cache
@ -25,5 +21,4 @@
args:
warn: no
changed_when: false
when:
ansible_pkg_mgr == 'yum'
when: ansible_pkg_mgr == 'yum'

View File

@ -1,5 +1,4 @@
---
- name: include suse_obs_repository.yml
include_tasks: suse_obs_repository.yml
when:
- ceph_repository == 'obs'
when: ceph_repository == 'obs'

View File

@ -9,10 +9,8 @@
- name: include prerequisite_rhcs_iso_install_debian.yml
include_tasks: prerequisite_rhcs_iso_install_debian.yml
when:
- ceph_repository_type == 'iso'
when: ceph_repository_type == 'iso'
- name: include prerequisite_rhcs_cdn_install_debian.yml
include_tasks: prerequisite_rhcs_cdn_install_debian.yml
when:
- ceph_repository_type == 'cdn'
when: ceph_repository_type == 'cdn'

View File

@ -1,8 +1,7 @@
---
- name: include configure_debian_repository_installation.yml
include_tasks: configure_debian_repository_installation.yml
when:
- ceph_origin == 'repository'
when: ceph_origin == 'repository'
- name: update apt cache if cache_valid_time has expired
apt:

View File

@ -1,15 +1,12 @@
---
- name: include configure_redhat_repository_installation.yml
include_tasks: configure_redhat_repository_installation.yml
when:
- ceph_origin == 'repository'
when: ceph_origin == 'repository'
- name: include configure_redhat_local_installation.yml
include_tasks: configure_redhat_local_installation.yml
when:
- ceph_origin == 'local'
when: ceph_origin == 'local'
- name: include install_redhat_packages.yml
include_tasks: install_redhat_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
when: (ceph_origin == 'repository' or ceph_origin == 'distro')

View File

@ -5,13 +5,11 @@
- name: Check for supported installation method on suse
fail:
msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}'"
when:
- ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs')
when: ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs')
- name: include configure_suse_repository_installation.yml
include_tasks: configure_suse_repository_installation.yml
when:
- ceph_origin == 'repository'
when: ceph_origin == 'repository'
- name: install dependencies
zypper:

View File

@ -5,8 +5,7 @@
state: present
register: result
until: result is succeeded
when:
- ansible_distribution == 'RedHat'
when: ansible_distribution == 'RedHat'
- name: install centos dependencies
yum:
@ -14,8 +13,7 @@
state: present
register: result
until: result is succeeded
when:
- ansible_distribution == 'CentOS'
when: ansible_distribution == 'CentOS'
- name: install redhat ceph packages
package:

View File

@ -2,17 +2,14 @@
- name: enable red hat storage monitor repository
rhsm_repository:
name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms"
when:
- (mon_group_name in group_names or mgr_group_name in group_names)
when: (mon_group_name in group_names or mgr_group_name in group_names)
- name: enable red hat storage osd repository
rhsm_repository:
name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms"
when:
- osd_group_name in group_names
when: osd_group_name in group_names
- name: enable red hat storage tools repository
rhsm_repository:
name: "rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms"
when:
- (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names)
when: (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names)

View File

@ -12,8 +12,7 @@
path: "{{ ceph_rhcs_iso_path | dirname }}"
state: directory
recurse: yes
when:
- ceph_rhcs_iso_path | dirname != '/'
when: ceph_rhcs_iso_path | dirname != '/'
- name: fetch the red hat storage iso from the ansible server for redhat systems
copy:

View File

@ -12,8 +12,7 @@
path: "{{ ceph_rhcs_iso_path | dirname }}"
state: directory
recurse: yes
when:
- ceph_rhcs_iso_path | dirname != '/'
when: ceph_rhcs_iso_path | dirname != '/'
- name: fetch the red hat storage iso from the ansible server for debian systems
copy:

View File

@ -4,8 +4,7 @@
name: yum-plugin-priorities
register: result
until: result is succeeded
tags:
- with_pkg
tags: with_pkg
- name: configure red hat ceph community repository stable key
rpm_key:

View File

@ -1,10 +1,8 @@
---
- name: include prerequisite_rhcs_iso_install.yml
include_tasks: prerequisite_rhcs_iso_install.yml
when:
- ceph_repository_type == 'iso'
when: ceph_repository_type == 'iso'
- name: include prerequisite_rhcs_cdn_install.yml
include_tasks: prerequisite_rhcs_cdn_install.yml
when:
- ceph_repository_type == 'cdn'
when: ceph_repository_type == 'cdn'

View File

@ -2,27 +2,22 @@
- name: include_tasks installs/install_on_redhat.yml
include_tasks: installs/install_on_redhat.yml
when: ansible_os_family == 'RedHat'
tags:
- package-install
tags: package-install
- name: include_tasks installs/install_on_suse.yml
include_tasks: installs/install_on_suse.yml
when: ansible_os_family == 'Suse'
tags:
- package-install
tags: package-install
- name: include installs/install_on_debian.yml
include_tasks: installs/install_on_debian.yml
tags:
- package-install
when:
- ansible_os_family == 'Debian'
tags: package-install
when: ansible_os_family == 'Debian'
- name: include_tasks installs/install_on_clear.yml
include_tasks: installs/install_on_clear.yml
when: ansible_os_family == 'ClearLinux'
tags:
- package-install
tags: package-install
- name: get ceph version
command: ceph --version
@ -37,12 +32,10 @@
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
- name: include release-rhcs.yml
include_tasks: release-rhcs.yml
when:
- ceph_repository in ['rhcs', 'dev']
when: ceph_repository in ['rhcs', 'dev']
or
ceph_origin == 'distro'
tags:
- always
tags: always
- name: set_fact ceph_release - override ceph_release with ceph_stable_release
set_fact:
@ -50,8 +43,7 @@
when:
- ceph_origin == 'repository'
- ceph_repository not in ['dev', 'rhcs']
tags:
- always
tags: always
- name: include create_rbd_client_dir.yml
include_tasks: create_rbd_client_dir.yml

View File

@ -2,29 +2,24 @@
- name: set_fact ceph_release jewel
set_fact:
ceph_release: jewel
when:
- ceph_version.split('.')[0] is version_compare('10', '==')
when: ceph_version.split('.')[0] is version_compare('10', '==')
- name: set_fact ceph_release kraken
set_fact:
ceph_release: kraken
when:
- ceph_version.split('.')[0] is version_compare('11', '==')
when: ceph_version.split('.')[0] is version_compare('11', '==')
- name: set_fact ceph_release luminous
set_fact:
ceph_release: luminous
when:
- ceph_version.split('.')[0] is version_compare('12', '==')
when: ceph_version.split('.')[0] is version_compare('12', '==')
- name: set_fact ceph_release mimic
set_fact:
ceph_release: mimic
when:
- ceph_version.split('.')[0] is version_compare('13', '==')
when: ceph_version.split('.')[0] is version_compare('13', '==')
- name: set_fact ceph_release nautilus
set_fact:
ceph_release: nautilus
when:
- ceph_version.split('.')[0] is version_compare('14', '==')
when: ceph_version.split('.')[0] is version_compare('14', '==')

View File

@ -1,8 +1,7 @@
---
- name: include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml
when:
- containerized_deployment|bool
when: containerized_deployment|bool
- name: config file operations related to OSDs
when:
@ -14,8 +13,7 @@
- name: count number of osds for lvm scenario
set_fact:
num_osds: "{{ lvm_volumes | length | int }}"
when:
- lvm_volumes | default([]) | length > 0
when: lvm_volumes | default([]) | length > 0
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
@ -33,8 +31,7 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8
when:
- devices | default([]) | length > 0
when: devices | default([]) | length > 0
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
set_fact:
@ -65,8 +62,7 @@
# ceph-common
- name: config file operation for non-containerized scenarios
when:
- not containerized_deployment|bool
when: not containerized_deployment|bool
block:
- name: create ceph conf directory
file:
@ -102,8 +98,7 @@
state: directory
mode: "0755"
delegate_to: localhost
when:
- ceph_conf_local
when: ceph_conf_local
- name: "generate {{ cluster }}.conf configuration file locally"
config_template:
@ -120,8 +115,7 @@
- ceph_conf_local
- name: config file operations for containerized scenarios
when:
- containerized_deployment|bool
when: containerized_deployment|bool
block:
- name: create a local fetch directory if it does not exist
file:

View File

@ -183,8 +183,7 @@
until: docker_image.rc == 0
retries: "{{ docker_pull_retry }}"
delay: 10
when:
- (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
@ -195,15 +194,13 @@
- name: set_fact image_repodigest_after_pulling
set_fact:
image_repodigest_after_pulling: "{{ (image_inspect_after_pull.stdout | from_json)[0].RepoDigests[0].split('@')[1] }}"
when:
- image_inspect_after_pull.rc == 0
when: image_inspect_after_pull.rc == 0
- name: set_fact ceph_mon_image_updated
set_fact:
ceph_mon_image_updated: "{{ ceph_mon_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph mons
notify: restart ceph mons
when:
- mon_group_name in group_names
- ceph_mon_container_inspect_before_pull.get('rc') == 0
@ -213,8 +210,7 @@
set_fact:
ceph_osd_image_updated: "{{ ceph_osd_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph osds
notify: restart ceph osds
when:
- osd_group_name in group_names
- ceph_osd_container_inspect_before_pull.get('rc') == 0
@ -224,8 +220,7 @@
set_fact:
ceph_mds_image_updated: "{{ ceph_mds_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph mdss
notify: restart ceph mdss
when:
- mds_group_name in group_names
- ceph_mds_container_inspect_before_pull.get('rc') == 0
@ -235,8 +230,7 @@
set_fact:
ceph_rgw_image_updated: "{{ ceph_rgw_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph rgws
notify: restart ceph rgws
when:
- rgw_group_name in group_names
- ceph_rgw_container_inspect_before_pull.get('rc') == 0
@ -246,8 +240,7 @@
set_fact:
ceph_mgr_image_updated: "{{ ceph_mgr_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph mgrs
notify: restart ceph mgrs
when:
- mgr_group_name in group_names
- ceph_mgr_container_inspect_before_pull.get('rc') == 0
@ -257,8 +250,7 @@
set_fact:
ceph_rbd_mirror_image_updated: "{{ ceph_rbd_mirror_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph rbdmirrors
notify: restart ceph rbdmirrors
when:
- rbdmirror_group_name in group_names
- ceph_rbd_mirror_container_inspect_before_pull.get('rc') == 0
@ -268,8 +260,7 @@
set_fact:
ceph_nfs_image_updated: "{{ ceph_nfs_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
changed_when: true
notify:
- restart ceph nfss
notify: restart ceph nfss
when:
- nfs_group_name in group_names
- ceph_nfs_container_inspect_before_pull.get('rc') == 0
@ -280,25 +271,22 @@
{{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
"{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
delegate_to: localhost
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true
- name: copy ceph dev image file
copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: load ceph dev image
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: remove tmp ceph dev image file
file:
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
state: absent
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)

View File

@ -1,27 +1,23 @@
---
- name: include pre_requisites/prerequisites.yml
include_tasks: pre_requisites/prerequisites.yml
when:
- not is_atomic
when: not is_atomic
- name: get docker version
command: docker --version
changed_when: false
check_mode: no
register: ceph_docker_version
when:
- container_binary == 'docker'
when: container_binary == 'docker'
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
set_fact:
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
when:
- container_binary == 'docker'
when: container_binary == 'docker'
- name: include fetch_image.yml
include_tasks: fetch_image.yml
tags:
- fetch_container_image
tags: fetch_container_image
- name: get ceph version
command: >

View File

@ -13,8 +13,7 @@
when:
- ansible_os_family == 'Debian'
- container_package_name == 'docker-ce'
tags:
with_pkg
tags: with_pkg
# ensure extras enabled for docker
- name: enable extras on centos
@ -32,8 +31,7 @@
package:
name: ['{{ container_package_name }}', '{{ container_binding_name }}']
update_cache: true
tags:
with_pkg
tags: with_pkg
- name: start container service
service:

View File

@ -2,29 +2,25 @@
- name: set_fact ceph_release jewel
set_fact:
ceph_release: jewel
when:
- ceph_version.split('.')[0] is version_compare('10', '==')
when: ceph_version.split('.')[0] is version_compare('10', '==')
- name: set_fact ceph_release kraken
set_fact:
ceph_release: kraken
when:
- ceph_version.split('.')[0] is version_compare('11', '==')
when: ceph_version.split('.')[0] is version_compare('11', '==')
- name: set_fact ceph_release luminous
set_fact:
ceph_release: luminous
when:
- ceph_version.split('.')[0] is version_compare('12', '==')
when: ceph_version.split('.')[0] is version_compare('12', '==')
- name: set_fact ceph_release mimic
set_fact:
ceph_release: mimic
when:
- ceph_version.split('.')[0] is version_compare('13', '==')
when: ceph_version.split('.')[0] is version_compare('13', '==')
- name: set_fact ceph_release nautilus
set_fact:
ceph_release: nautilus
when:
- ceph_version.split('.')[0] is version_compare('14', '==')
when: ceph_version.split('.')[0] is version_compare('14', '==')

View File

@ -30,14 +30,12 @@
- name: set_fact monitor_name ansible_hostname
set_fact:
monitor_name: "{{ ansible_hostname }}"
when:
- not mon_use_fqdn
when: not mon_use_fqdn
- name: set_fact monitor_name ansible_fqdn
set_fact:
monitor_name: "{{ ansible_fqdn }}"
when:
- mon_use_fqdn
when: mon_use_fqdn
- name: set_fact docker_exec_cmd
set_fact:
@ -67,8 +65,7 @@
set_fact:
ceph_current_status:
rc: 1
when:
- rolling_update or groups.get(mon_group_name, []) | length == 0
when: rolling_update or groups.get(mon_group_name, []) | length == 0
- name: create a local fetch directory if it does not exist
file:
@ -77,21 +74,18 @@
delegate_to: localhost
changed_when: false
become: false
when:
- (cephx or generate_fsid)
when: cephx or generate_fsid
- name: get current fsid
command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
when:
- rolling_update
when: rolling_update
- name: set_fact fsid
set_fact:
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
when:
- rolling_update
when: rolling_update
- name: set_fact ceph_current_status (convert to json)
set_fact:
@ -103,8 +97,7 @@
- name: set_fact fsid from ceph_current_status
set_fact:
fsid: "{{ ceph_current_status.fsid }}"
when:
- ceph_current_status.fsid is defined
when: ceph_current_status.fsid is defined
- name: fsid realted tasks
when:
@ -126,34 +119,29 @@
- name: set_fact mds_name ansible_hostname
set_fact:
mds_name: "{{ ansible_hostname }}"
when:
- not mds_use_fqdn
when: not mds_use_fqdn
- name: set_fact mds_name ansible_fqdn
set_fact:
mds_name: "{{ ansible_fqdn }}"
when:
- mds_use_fqdn
when: mds_use_fqdn
- name: set_fact rbd_client_directory_owner ceph
set_fact:
rbd_client_directory_owner: ceph
when:
- rbd_client_directory_owner is not defined
when: rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- name: set_fact rbd_client_directory_group rbd_client_directory_group
set_fact:
rbd_client_directory_group: ceph
when:
- rbd_client_directory_group is not defined
when: rbd_client_directory_group is not defined
or not rbd_client_directory_group
- name: set_fact rbd_client_directory_mode 0770
set_fact:
rbd_client_directory_mode: "0770"
when:
- rbd_client_directory_mode is not defined
when: rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- name: resolve device link(s)
@ -281,15 +269,13 @@
- name: import_tasks set_radosgw_address.yml
import_tasks: set_radosgw_address.yml
when:
- inventory_hostname in groups.get(rgw_group_name, [])
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: set_fact rgw_instances
set_fact:
rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': _radosgw_address, 'radosgw_frontend_port': radosgw_frontend_port|int + item|int}]) }}"
with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
when:
- inventory_hostname in groups.get(rgw_group_name, [])
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: set ntp service name depending on OS family
block:

View File

@ -2,8 +2,7 @@
- name: set_fact _monitor_address to monitor_address_block ipv4
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first }] }}"
with_items:
- "{{ groups.get(mon_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address_block'] is defined
@ -13,8 +12,7 @@
- name: set_fact _monitor_address to monitor_address_block ipv6
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | last | ipwrap }] }}"
with_items:
- "{{ groups.get(mon_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address_block'] is defined
@ -24,8 +22,7 @@
- name: set_fact _monitor_address to monitor_address
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
with_items:
- "{{ groups.get(mon_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address'] is defined
@ -34,8 +31,7 @@
- name: set_fact _monitor_address to monitor_interface - ipv4
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
with_items:
- "{{ groups.get(mon_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- ip_version == 'ipv4'
@ -46,8 +42,7 @@
- name: set_fact _monitor_address to monitor_interface - ipv6
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
with_items:
- "{{ groups.get(mon_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- ip_version == 'ipv6'

View File

@ -35,11 +35,9 @@
- name: set_fact _radosgw_address to radosgw_interface - ipv4
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
when:
- ip_version == 'ipv4'
when: ip_version == 'ipv4'
- name: set_fact _radosgw_address to radosgw_interface - ipv6
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}"
when:
- ip_version == 'ipv6'
when: ip_version == 'ipv6'

View File

@ -5,8 +5,7 @@
- name: update apt cache
apt:
update-cache: yes
when:
- ansible_os_family == 'Debian'
when: ansible_os_family == 'Debian'
register: result
until: result is succeeded
@ -140,8 +139,7 @@
group: root
mode: 0750
listen: "restart ceph mdss"
when:
- mds_group_name in group_names
when: mds_group_name in group_names
- name: restart ceph mds daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
@ -188,8 +186,7 @@
group: root
mode: 0750
listen: "restart ceph rgws"
when:
- rgw_group_name in group_names
when: rgw_group_name in group_names
- name: restart ceph rgw daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
@ -236,8 +233,7 @@
group: root
mode: 0750
listen: "restart ceph nfss"
when:
- nfs_group_name in group_names
when: nfs_group_name in group_names
- name: restart ceph nfs daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
@ -284,8 +280,7 @@
group: root
mode: 0750
listen: "restart ceph rbdmirrors"
when:
- rbdmirror_group_name in group_names
when: rbdmirror_group_name in group_names
- name: restart ceph rbd mirror daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
@ -332,8 +327,7 @@
group: root
mode: 0750
listen: "restart ceph mgrs"
when:
- mgr_group_name in group_names
when: mgr_group_name in group_names
- name: restart ceph mgr daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
@ -382,8 +376,7 @@
group: root
mode: 0750
listen: "restart ceph tcmu-runner"
when:
- iscsi_gw_group_name in group_names
when: iscsi_gw_group_name in group_names
- name: restart tcmu-runner
command: /usr/bin/env bash /tmp/restart_tcmu_runner.sh
@ -415,8 +408,7 @@
group: root
mode: 0750
listen: "restart ceph rbd-target-gw"
when:
- iscsi_gw_group_name in group_names
when: iscsi_gw_group_name in group_names
- name: restart rbd-target-gw
command: /usr/bin/env bash /tmp/restart_rbd_target_gw.sh
@ -448,8 +440,7 @@
group: root
mode: 0750
listen: "restart ceph rbd-target-api"
when:
- iscsi_gw_group_name in group_names
when: iscsi_gw_group_name in group_names
- name: restart rbd-target-api
command: /usr/bin/env bash /tmp/restart_rbd_target_api.sh

View File

@ -1,10 +1,8 @@
---
- name: include check_running_containers.yml
include_tasks: check_running_containers.yml
when:
- containerized_deployment
when: containerized_deployment
- name: include check_socket_non_container.yml
include_tasks: check_socket_non_container.yml
when:
- not containerized_deployment
when: not containerized_deployment

View File

@ -5,8 +5,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(mon_group_name, [])
when: inventory_hostname in groups.get(mon_group_name, [])
- name: check for an osd container
command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
@ -14,8 +13,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(osd_group_name, [])
when: inventory_hostname in groups.get(osd_group_name, [])
- name: check for a mds container
command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
@ -23,8 +21,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(mds_group_name, [])
when: inventory_hostname in groups.get(mds_group_name, [])
- name: check for a rgw container
command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
@ -32,8 +29,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(rgw_group_name, [])
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: check for a mgr container
command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
@ -41,8 +37,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(mgr_group_name, [])
when: inventory_hostname in groups.get(mgr_group_name, [])
- name: check for a rbd mirror container
command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
@ -50,8 +45,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check for a nfs container
command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
@ -59,8 +53,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(nfs_group_name, [])
when: inventory_hostname in groups.get(nfs_group_name, [])
- name: check for a tcmu-runner container
command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
@ -68,8 +61,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api container
command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
@ -77,8 +69,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw container
command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
@ -86,5 +77,4 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])

View File

@ -5,8 +5,7 @@
failed_when: false
check_mode: no
register: mon_socket_stat
when:
- inventory_hostname in groups.get(mon_group_name, [])
when: inventory_hostname in groups.get(mon_group_name, [])
- name: check if the ceph mon socket is in-use
command: fuser --silent {{ mon_socket_stat.stdout }}
@ -34,8 +33,7 @@
failed_when: false
check_mode: no
register: osd_socket_stat
when:
- inventory_hostname in groups.get(osd_group_name, [])
when: inventory_hostname in groups.get(osd_group_name, [])
- name: check if the ceph osd socket is in-use
command: fuser --silent {{ osd_socket_stat.stdout }}
@ -63,8 +61,7 @@
failed_when: false
check_mode: no
register: mds_socket_stat
when:
- inventory_hostname in groups.get(mds_group_name, [])
when: inventory_hostname in groups.get(mds_group_name, [])
- name: check if the ceph mds socket is in-use
command: fuser --silent {{ mds_socket_stat.stdout }}
@ -92,8 +89,7 @@
failed_when: false
check_mode: no
register: rgw_socket_stat
when:
- inventory_hostname in groups.get(rgw_group_name, [])
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: check if the ceph rgw socket is in-use
command: fuser --silent {{ rgw_socket_stat.stdout }}
@ -121,8 +117,7 @@
failed_when: false
check_mode: no
register: mgr_socket_stat
when:
- inventory_hostname in groups.get(mgr_group_name, [])
when: inventory_hostname in groups.get(mgr_group_name, [])
- name: check if the ceph mgr socket is in-use
command: fuser --silent {{ mgr_socket_stat.stdout }}
@ -150,8 +145,7 @@
failed_when: false
check_mode: no
register: rbd_mirror_socket_stat
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check if the ceph rbd mirror socket is in-use
command: fuser --silent {{ rbd_mirror_socket_stat.stdout }}
@ -178,8 +172,7 @@
failed_when: false
check_mode: no
register: nfs_socket_stat
when:
- inventory_hostname in groups.get(nfs_group_name, [])
when: inventory_hostname in groups.get(nfs_group_name, [])
- name: check if the ceph nfs ganesha socket is in-use
command: fuser --silent {{ nfs_socket_stat.stdout }}
@ -206,8 +199,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api
command: "pgrep rbd-target-api"
@ -215,8 +207,7 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw
command: "pgrep name=rbd-target-gw"
@ -224,5 +215,4 @@
changed_when: false
failed_when: false
check_mode: no
when:
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])

View File

@ -7,13 +7,10 @@
ignore_errors: true
check_mode: no
changed_when: false
tags:
- firewall
when:
- not containerized_deployment
tags: firewall
when: not containerized_deployment
- when:
- (firewalld_pkg_query.get('rc', 1) == 0
- when: (firewalld_pkg_query.get('rc', 1) == 0
or is_atomic)
block:
- name: start firewalld
@ -36,8 +33,7 @@
when:
- mon_group_name is defined
- mon_group_name in group_names
tags:
- firewall
tags: firewall
- name: open manager ports
firewalld:
@ -50,8 +46,7 @@
when:
- mgr_group_name is defined
- mgr_group_name in group_names
tags:
- firewall
tags: firewall
- name: open osd ports
firewalld:
@ -67,8 +62,7 @@
when:
- osd_group_name is defined
- osd_group_name in group_names
tags:
- firewall
tags: firewall
- name: open rgw ports
firewalld:
@ -81,8 +75,7 @@
when:
- rgw_group_name is defined
- rgw_group_name in group_names
tags:
- firewall
tags: firewall
- name: open mds ports
firewalld:
@ -95,8 +88,7 @@
when:
- mds_group_name is defined
- mds_group_name in group_names
tags:
- firewall
tags: firewall
- name: open nfs ports
firewalld:
@ -109,8 +101,7 @@
when:
- nfs_group_name is defined
- nfs_group_name in group_names
tags:
- firewall
tags: firewall
- name: open nfs ports (portmapper)
firewalld:
@ -123,8 +114,7 @@
when:
- nfs_group_name is defined
- nfs_group_name in group_names
tags:
- firewall
tags: firewall
- name: open rbdmirror ports
firewalld:
@ -137,8 +127,7 @@
when:
- rbdmirror_group_name is defined
- rbdmirror_group_name in group_names
tags:
- firewall
tags: firewall
- name: open iscsi target ports
firewalld:
@ -151,8 +140,7 @@
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
tags:
- firewall
tags: firewall
- name: open iscsi api ports
firewalld:
@ -165,7 +153,6 @@
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
tags:
- firewall
tags: firewall
- meta: flush_handlers

View File

@ -10,8 +10,7 @@
state: present
register: result
until: result is succeeded
when:
- ntp_daemon_type == "ntpd"
when: ntp_daemon_type == "ntpd"
- name: install chrony
package:
@ -19,8 +18,7 @@
state: present
register: result
until: result is succeeded
when:
- ntp_daemon_type == "chronyd"
when: ntp_daemon_type == "chronyd"
- name: enable the ntp daemon and disable the rest
block:
@ -29,13 +27,11 @@
notify:
- disable ntpd
- disable chronyd
when:
- ntp_daemon_type == "timesyncd"
when: ntp_daemon_type == "timesyncd"
- name: disable time sync using timesyncd if we are not using it
command: timedatectl set-ntp no
when:
- ntp_daemon_type != "timesyncd"
when: ntp_daemon_type != "timesyncd"
- name: enable ntpd
service:
@ -45,8 +41,7 @@
notify:
- disable chronyd
- disable timesyncd
when:
- ntp_daemon_type == "ntpd"
when: ntp_daemon_type == "ntpd"
- name: enable chronyd
service:
@ -56,5 +51,4 @@
notify:
- disable ntpd
- disable timesyncd
when:
- ntp_daemon_type == "chronyd"
when: ntp_daemon_type == "chronyd"

View File

@ -2,8 +2,7 @@
- name: make sure gateway_ip_list is configured
fail:
msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
when:
- gateway_ip_list == "0.0.0.0"
when: gateway_ip_list == "0.0.0.0"
- name: copy admin key
copy:
@ -12,8 +11,7 @@
owner: "root"
group: "root"
mode: "{{ ceph_keyring_permissions }}"
when:
- cephx
when: cephx
- name: deploy gateway settings, used by the ceph_iscsi_config modules
template:
@ -44,5 +42,4 @@
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size
when: rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size

View File

@ -11,8 +11,7 @@
- tcmu-runner
- rbd-target-gw
- rbd-target-api
notify:
- restart ceph {{ item }}
notify: restart ceph {{ item }}
- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
systemd:

View File

@ -26,8 +26,7 @@
become: False
run_once: True
with_items: "{{ crt_files_exist.results }}"
when:
- not item.stat.exists
when: not item.stat.exists
- name: create pem
shell: >
@ -38,8 +37,7 @@
run_once: True
register: pem
with_items: "{{ crt_files_exist.results }}"
when:
- not item.stat.exists
when: not item.stat.exists
- name: create public key from pem
shell: >
@ -48,10 +46,8 @@
delegate_to: localhost
become: False
run_once: True
when:
- pem.changed
tags:
- skip_ansible_lint
when: pem.changed
tags: skip_ansible_lint
- name: copy crt file(s) to gateway nodes
copy:

View File

@ -4,23 +4,19 @@
- name: include non-container/prerequisites.yml
include_tasks: non-container/prerequisites.yml
when:
- not containerized_deployment
when: not containerized_deployment
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
# the API for https support.
- name: include deploy_ssl_keys.yml
include_tasks: deploy_ssl_keys.yml
when:
- generate_crt|bool
when: generate_crt|bool
- name: include non-container/configure_iscsi.yml
include_tasks: non-container/configure_iscsi.yml
when:
- not containerized_deployment
when: not containerized_deployment
- name: include containerized.yml
include_tasks: container/containerized.yml
when:
- containerized_deployment
when: containerized_deployment

View File

@ -1,7 +1,6 @@
---
- name: red hat based systems tasks
when:
- ansible_os_family == 'RedHat'
when: ansible_os_family == 'RedHat'
block:
- name: when ceph_iscsi_config_dev is true
when:

View File

@ -7,8 +7,7 @@
set_fact:
admin_keyring:
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
when:
- copy_admin_key
when: copy_admin_key
- name: set_fact ceph_config_keys
set_fact:
@ -18,8 +17,7 @@
- name: merge ceph_config_keys and admin_keyring
set_fact:
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
when:
- copy_admin_key
when: copy_admin_key
- name: stat for ceph config and keys
stat:
@ -53,8 +51,7 @@
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mdss
notify: restart ceph mdss
- name: systemd start mds container
systemd:

View File

@ -40,15 +40,13 @@
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: check if ceph filesystem already exists
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
@ -61,8 +59,7 @@
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- check_existing_cephfs.rc != 0
when: check_existing_cephfs.rc != 0
- name: assign application to cephfs pools
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs"
@ -71,12 +68,10 @@
- "{{ cephfs_metadata }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- check_existing_cephfs.rc != 0
when: check_existing_cephfs.rc != 0
- name: set max_mds
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- mds_max_mds > 1
when: mds_max_mds > 1

View File

@ -8,8 +8,7 @@
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
when:
- containerized_deployment
when: containerized_deployment
- name: include common.yml
include_tasks: common.yml

View File

@ -24,8 +24,7 @@
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
when:
- groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)"
when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)"
- name: copy ceph keyring(s) if needed
copy:
@ -48,5 +47,4 @@
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when:
- cephx
when: cephx

View File

@ -2,8 +2,7 @@
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
when: containerized_deployment
- name: include common.yml
include_tasks: common.yml

View File

@ -27,12 +27,10 @@
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item not in ceph_mgr_modules
when: item not in ceph_mgr_modules
- name: add modules to ceph-mgr
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
with_items: "{{ ceph_mgr_modules }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])

View File

@ -5,8 +5,7 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
when:
- ansible_os_family in ['RedHat', 'Suse']
when: ansible_os_family in ['RedHat', 'Suse']
- name: install ceph-mgr packages for debian
apt:
@ -15,5 +14,4 @@
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else '' }}"
register: result
until: result is succeeded
when:
- ansible_os_family == 'Debian'
when: ansible_os_family == 'Debian'

View File

@ -25,10 +25,8 @@
owner: "root"
group: "root"
mode: "0644"
when:
- containerized_deployment
notify:
- restart ceph mgrs
when: containerized_deployment
notify: restart ceph mgrs
- name: systemd start mgr
systemd:

View File

@ -17,8 +17,7 @@
changed_when: false
- name: tasks for MONs when cephx is enabled
when:
- cephx
when: cephx
block:
- name: fetch ceph initial keys
ceph_key:
@ -48,8 +47,7 @@
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
run_once: True
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -58,22 +56,19 @@
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
flat: yes
with_items:
- "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
with_items: "{{ groups.get(mon_group_name) if groups.get(mgr_group_name, []) | length == 0 else groups.get(mgr_group_name, []) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: copy keys to the ansible server
fetch:
src: "{{ item }}"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
flat: yes
with_items:
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring
- /etc/ceph/{{ cluster }}.client.admin.keyring
when:
- cephx
- inventory_hostname == groups[mon_group_name] | last
- name: copy keys to the ansible server
fetch:
src: "{{ item }}"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
flat: yes
with_items:
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring
- /etc/ceph/{{ cluster }}.client.admin.keyring
when: inventory_hostname == groups[mon_group_name] | last

View File

@ -15,8 +15,7 @@
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}"
with_items: "{{ crush_rules | unique }}"
changed_when: false
when:
- inventory_hostname == groups.get(mon_group_name) | last
when: inventory_hostname == groups.get(mon_group_name) | last
- name: get id for new default crush rule
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}"

View File

@ -108,8 +108,7 @@
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
- cephx
when: cephx
- name: ceph monitor mkfs without keyring
command: >
@ -122,5 +121,4 @@
--fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
- not cephx
when: not cephx

View File

@ -2,8 +2,7 @@
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when:
- containerized_deployment
when: containerized_deployment
- name: include deploy_monitors.yml
include_tasks: deploy_monitors.yml
@ -17,8 +16,7 @@
- name: include_tasks ceph_keys.yml
include_tasks: ceph_keys.yml
when:
- not switch_to_containers | default(False)
when: not switch_to_containers | default(False)
- name: include secure_cluster.yml
include_tasks: secure_cluster.yml
@ -28,5 +26,5 @@
- name: crush_rules.yml
include_tasks: crush_rules.yml
when:
- crush_rule_config
when: crush_rule_config

View File

@ -27,8 +27,7 @@
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph mons
notify: restart ceph mons
when: containerized_deployment
- name: start the monitor service

View File

@ -2,8 +2,7 @@
- name: set_fact docker_exec_cmd_nfs
set_fact:
docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
when: containerized_deployment
- name: check if "{{ ceph_nfs_rgw_user }}" exists
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
@ -12,8 +11,7 @@
changed_when: false
failed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- nfs_obj_gw
when: nfs_obj_gw
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"

View File

@ -12,8 +12,7 @@
state: present
register: result
until: result is succeeded
when:
- selinuxstatus.stdout != 'Disabled'
when: selinuxstatus.stdout != 'Disabled'
- name: test if ganesha_t is already permissive
shell: |

View File

@ -2,21 +2,18 @@
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
when:
- containerized_deployment
when: containerized_deployment
- name: include common.yml
include_tasks: common.yml
- name: include pre_requisite_non_container.yml
include_tasks: pre_requisite_non_container.yml
when:
- not containerized_deployment
when: not containerized_deployment
- name: include pre_requisite_container.yml
include_tasks: pre_requisite_container.yml
when:
- containerized_deployment
when: containerized_deployment
- name: include create_rgw_nfs_user.yml
import_tasks: create_rgw_nfs_user.yml

View File

@ -3,8 +3,7 @@
set_fact:
admin_keyring:
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
when:
- copy_admin_key
when: copy_admin_key
- name: set_fact ceph_config_keys
set_fact:
@ -14,8 +13,7 @@
- name: merge ceph_config_keys and admin_keyring
set_fact:
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
when:
- copy_admin_key
when: copy_admin_key
- name: stat for config and keys
stat:
@ -39,8 +37,7 @@
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
when:
- item.1.stat.exists
when: item.1.stat.exists
- name: create dbus service file
become: true
@ -50,10 +47,8 @@
owner: "root"
group: "root"
mode: "0644"
when:
- ceph_nfs_dynamic_exports
when: ceph_nfs_dynamic_exports
- name: reload dbus configuration
command: "killall -SIGHUP dbus-daemon"
when:
- ceph_nfs_dynamic_exports
when: ceph_nfs_dynamic_exports

View File

@ -1,13 +1,11 @@
---
- name: include red hat based system related tasks
include_tasks: pre_requisite_non_container_red_hat.yml
when:
- ansible_os_family == 'RedHat'
when: ansible_os_family == 'RedHat'
- name: include debian based system related tasks
include_tasks: pre_requisite_non_container_debian.yml
when:
- ansible_os_family == 'Debian'
when: ansible_os_family == 'Debian'
- name: install nfs rgw/cephfs gateway - suse
zypper:
@ -41,12 +39,10 @@
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
- { name: "/var/log/ceph", create: true }
- { name: "/var/run/ceph", create: true }
when:
- item.create|bool
when: item.create|bool
- name: cephx related tasks
when:
- cephx
when: cephx
block:
- name: copy bootstrap cephx keys
copy:
@ -55,14 +51,11 @@
owner: "ceph"
group: "ceph"
mode: "0600"
with_items:
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
when:
- item.copy_key|bool
with_items: { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
when: item.copy_key|bool
- name: nfs object gateway related tasks
when:
- nfs_obj_gw
when: nfs_obj_gw
block:
- name: create rados gateway keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring

View File

@ -1,11 +1,9 @@
---
- name: debian based systems - repo handling
when:
- ceph_origin == 'repository'
when: ceph_origin == 'repository'
block:
- name: stable repos specific tasks
when:
- nfs_ganesha_stable
when: nfs_ganesha_stable
- ceph_repository == 'community'
block:
- name: add nfs-ganesha stable repository
@ -22,8 +20,7 @@
retries: 5
delay: 2
until: update_ganesha_apt_cache | success
when:
- add_ganesha_apt_repo | changed
when: add_ganesha_apt_repo | changed
- name: debian based systems - dev repos specific tasks
when:
@ -90,13 +87,11 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
when:
- nfs_file_gw
when: nfs_file_gw
- name: install red hat storage nfs obj gateway
apt:
name: nfs-ganesha-rgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
when:
- nfs_obj_gw
when: nfs_obj_gw

View File

@ -1,7 +1,6 @@
---
- name: red hat based systems - repo handling
when:
- ceph_origin == 'repository'
when: ceph_origin == 'repository'
block:
- name: add nfs-ganesha stable repository
yum_repository:
@ -42,8 +41,7 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
when:
- nfs_file_gw
when: nfs_file_gw
- name: install redhat nfs-ganesha-rgw and ceph-radosgw packages
package:
@ -51,5 +49,4 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
when:
- nfs_obj_gw
when: nfs_obj_gw

View File

@ -2,8 +2,7 @@
- name: set_fact docker_exec_cmd_nfs
set_fact:
docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
when: containerized_deployment
- name: check if rados index object exists
shell: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
@ -11,8 +10,7 @@
failed_when: false
register: rados_index_exists
check_mode: no
when:
- ceph_nfs_rados_backend
when: ceph_nfs_rados_backend
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
@ -41,8 +39,7 @@
group: "root"
mode: "0644"
config_type: ini
notify:
- restart ceph nfss
notify: restart ceph nfss
- name: create exports directory
file:
@ -51,8 +48,7 @@
owner: "root"
group: "root"
mode: "0755"
when:
- ceph_nfs_dynamic_exports
when: ceph_nfs_dynamic_exports
- name: create exports dir index file
copy:
@ -62,8 +58,7 @@
owner: "root"
group: "root"
mode: "0644"
when:
- ceph_nfs_dynamic_exports
when: ceph_nfs_dynamic_exports
- name: generate systemd unit file
become: true
@ -73,10 +68,8 @@
owner: "root"
group: "root"
mode: "0644"
when:
- containerized_deployment
notify:
- restart ceph nfss
when: containerized_deployment
notify: restart ceph nfss
- name: systemd start nfs container
systemd:

View File

@ -6,8 +6,7 @@
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
when:
- cephx
when: cephx
with_items:
- /var/lib/ceph/bootstrap-osd/
- /var/lib/ceph/osd/

View File

@ -20,18 +20,15 @@
when:
- containerized_deployment
- ceph_osd_numactl_opts != ""
tags:
- with_pkg
tags: with_pkg
- name: install lvm2
package:
name: lvm2
register: result
until: result is succeeded
when:
- not is_atomic
tags:
- with_pkg
when: not is_atomic
tags: with_pkg
- name: include_tasks common.yml
include_tasks: common.yml

View File

@ -39,8 +39,7 @@
- "{{ created_pools.results }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item.1.get('rc', 0) != 0
when: item.1.get('rc', 0) != 0
- name: customize pool size
command: >
@ -49,8 +48,7 @@
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size
command: >
@ -59,16 +57,14 @@
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: assign application to pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ openstack_pools | unique }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item.application is defined
when: item.application is defined
- name: create openstack cephx key(s)
ceph_key:

View File

@ -1,7 +1,6 @@
---
- name: container specific tasks
when:
- containerized_deployment
when: containerized_deployment
block:
- name: umount ceph disk (if on openstack)
mount:
@ -9,8 +8,7 @@
src: /dev/vdb
fstype: ext3
state: unmounted
when:
- ceph_docker_on_openstack
when: ceph_docker_on_openstack
- name: generate ceph osd docker run script
become: true
@ -21,8 +19,7 @@
group: "root"
mode: "0744"
setype: "bin_t"
notify:
- restart ceph osds
notify: restart ceph osds
# this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph
- name: get osd ids
@ -49,10 +46,8 @@
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph osds
when:
- containerized_deployment
notify: restart ceph osds
when: containerized_deployment
- name: systemd start osd
systemd:

View File

@ -23,8 +23,7 @@
group: "root"
mode: "0755"
register: "tmpfiles_d"
when:
- disable_transparent_hugepage
when: disable_transparent_hugepage
- name: disable transparent hugepage
template:
@ -35,8 +34,7 @@
mode: "0644"
force: "yes"
validate: "systemd-tmpfiles --create %s"
when:
- disable_transparent_hugepage
when: disable_transparent_hugepage
- name: get default vm.min_free_kbytes
command: sysctl -b vm.min_free_kbytes

View File

@ -18,8 +18,7 @@
-o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
args:
creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
when:
- not containerized_deployment
when: not containerized_deployment
- name: set rbd-mirror key permissions
file:
@ -27,5 +26,4 @@
owner: "ceph"
group: "ceph"
mode: "{{ ceph_keyring_permissions }}"
when:
- not containerized_deployment
when: not containerized_deployment

View File

@ -8,8 +8,7 @@
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph rbdmirrors
notify: restart ceph rbdmirrors
- name: systemd start rbd mirror container
systemd:

View File

@ -2,23 +2,19 @@
- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
when:
- containerized_deployment
when: containerized_deployment
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
when:
- not containerized_deployment
when: not containerized_deployment
- name: include common.yml
include_tasks: common.yml
when:
- cephx
when: cephx
- name: include start_rbd_mirror.yml
include_tasks: start_rbd_mirror.yml
when:
- not containerized_deployment
when: not containerized_deployment
- name: include configure_mirroring.yml
include_tasks: configure_mirroring.yml
@ -28,5 +24,4 @@
- name: include docker/main.yml
include_tasks: docker/main.yml
when:
- containerized_deployment
when: containerized_deployment

View File

@ -7,5 +7,4 @@
state: present
register: result
until: result is succeeded
tags:
- package-install
tags: package-install

View File

@ -6,8 +6,7 @@
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
with_items:
- "{{ rbd_client_admin_socket_path }}"
with_items: "{{ rbd_client_admin_socket_path }}"
- name: create rados gateway instance directories
file:
@ -17,8 +16,7 @@
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
with_items: "{{ rgw_instances }}"
when:
- rgw_instances is defined
when: rgw_instances is defined
- name: copy ceph keyring(s) if needed
copy:

View File

@ -19,8 +19,7 @@
owner: "root"
group: "root"
mode: "0644"
notify:
- restart ceph rgws
notify: restart ceph rgws
- name: systemd start rgw container
systemd:

View File

@ -23,8 +23,7 @@
when: rgw_multisite
- name: rgw pool related tasks
when:
- rgw_create_pools is defined
when: rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
@ -43,5 +42,4 @@
run_once: true
register: result
until: result is succeeded
when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size

View File

@ -12,8 +12,7 @@
failed_when: false
register: rgw_remove_zone_from_zonegroup
changed_when: rgw_remove_zone_from_zonegroup.rc == 0
notify:
- update period
notify: update period
- name: delete the zone
command: radosgw-admin zone delete --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }}
@ -44,5 +43,4 @@
when:
- rgw_zone is defined
- rgw_zonegroup is defined
notify:
- restart rgw
notify: restart rgw

View File

@ -22,5 +22,4 @@
section: "client.rgw.{{ ansible_hostname }}"
option: "rgw_zone"
value: "{{ rgw_zone }}"
notify:
- restart rgw
notify: restart rgw

View File

@ -3,37 +3,30 @@
command: "{{ docker_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'No such file or directory' in realmcheck.stderr"
when: "'No such file or directory' in realmcheck.stderr"
- name: create the zonegroup
command: "{{ docker_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'No such file or directory' in zonegroupcheck.stderr"
when: "'No such file or directory' in zonegroupcheck.stderr"
- name: create the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'No such file or directory' in zonecheck.stderr"
when: "'No such file or directory' in zonecheck.stderr"
- name: create the zone user
command: "{{ docker_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'could not fetch user info: no user info saved' in usercheck.stderr"
notify:
- update period
when: "'could not fetch user info: no user info saved' in usercheck.stderr"
notify: update period
- name: add other endpoints to the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- rgw_multisite_endpoints_list is defined
notify:
- update period
when: rgw_multisite_endpoints_list is defined
notify: update period

View File

@ -3,15 +3,13 @@
command: "{{ docker_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'No such file or directory' in realmcheck.stderr"
when: "'No such file or directory' in realmcheck.stderr"
- name: fetch the period
command: "{{ docker_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'No such file or directory' in realmcheck.stderr"
when: "'No such file or directory' in realmcheck.stderr"
- name: set default realm
command: "{{ docker_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}"
@ -29,16 +27,12 @@
command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- "'No such file or directory' in zonecheck.stderr"
notify:
- update period
when: "'No such file or directory' in zonecheck.stderr"
notify: update period
- name: add other endpoints to the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- rgw_multisite_endpoints_list is defined
notify:
- update period
when: rgw_multisite_endpoints_list is defined
notify: update period

View File

@ -5,8 +5,7 @@
state: present
register: result
until: result is succeeded
when:
- ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
- name: install libnss3-tools on debian
package:
@ -14,8 +13,7 @@
state: present
register: result
until: result is succeeded
when:
- ansible_pkg_mgr == 'apt'
when: ansible_pkg_mgr == 'apt'
- name: create nss directory for keystone certificates
file:
@ -33,5 +31,4 @@
with_items:
- "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'"
- "openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d {{ radosgw_nss_db_path }} -n signing_cert -t 'P,P,P'"
tags:
- skip_ansible_lint
tags: skip_ansible_lint

View File

@ -5,8 +5,7 @@
creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
changed_when: false
with_items: "{{ rgw_instances }}"
when:
- cephx
when: cephx
- name: set rados gateway instance key permissions
file:
@ -15,5 +14,4 @@
group: "ceph"
mode: "0600"
with_items: "{{ rgw_instances }}"
when:
- cephx
when: cephx

View File

@ -3,8 +3,7 @@
file:
state: directory
path: "/etc/systemd/system/ceph-radosgw@.service.d/"
when:
- ceph_rgw_systemd_overrides is defined
when: ceph_rgw_systemd_overrides is defined
- name: add ceph-rgw systemd service overrides
config_template:
@ -12,8 +11,7 @@
dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf"
config_overrides: "{{ ceph_rgw_systemd_overrides | default({}) }}"
config_type: "ini"
when:
- ceph_rgw_systemd_overrides is defined
when: ceph_rgw_systemd_overrides is defined
- name: start rgw instance
service:

View File

@ -2,14 +2,12 @@
- name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
when:
- monitor_interface not in ansible_interfaces
when: monitor_interface not in ansible_interfaces
- name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
when:
- not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
when: not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
- name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
fail:

View File

@ -2,14 +2,12 @@
- name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
when:
- radosgw_interface not in ansible_interfaces
when: radosgw_interface not in ansible_interfaces
- name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
when:
- hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
when: hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
- name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
fail:

Some files were not shown because too many files have changed in this diff Show More