Use ansible_facts

It has come to our attention that using ansible_* vars that are
populated with INJECT_FACTS_AS_VARS=True is not very performant.  In
order to be able to support setting that to off, we need to update the
references to use ansible_facts[<thing>] instead of ansible_<thing>.

Related: ansible#73654
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1935406
Signed-off-by: Alex Schultz <aschultz@redhat.com>
(cherry picked from commit a7f2fa73e6)
pull/6363/head
Alex Schultz 2021-03-03 07:43:50 -07:00 committed by Guillaume Abrioux
parent ab857d8b54
commit 56aac327dd
173 changed files with 520 additions and 520 deletions

View File

@ -82,7 +82,7 @@ dummy:
#centos_package_dependencies:
# - epel-release
# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
@ -152,7 +152,7 @@ dummy:
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -180,7 +180,7 @@ dummy:
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
@ -190,7 +190,7 @@ dummy:
# usually has newer Ceph releases than the normal distro repository.
#
#
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
@ -253,7 +253,7 @@ dummy:
#ceph_conf_key_directory: /etc/ceph
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
@ -528,7 +528,7 @@ dummy:
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}

View File

@ -43,14 +43,14 @@ dummy:
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_api_docker_cpu_limit: 1

View File

@ -27,13 +27,13 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
#ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mds_docker_cpu_limit: 4
# we currently for MDS_NAME to hostname because of a bug in ceph-docker
# fix here: https://github.com/ceph/ceph-docker/pull/770
# this will go away soon.
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
#ceph_config_keys: [] # DON'T TOUCH ME

View File

@ -41,7 +41,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
#ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mgr_docker_cpu_limit: 1
#ceph_mgr_docker_extra_env:

View File

@ -45,7 +45,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
#ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mon_docker_cpu_limit: 1
#ceph_mon_container_listen_port: 3300

View File

@ -25,7 +25,7 @@ dummy:
#ceph_nfs_enable_service: true
# ceph-nfs systemd service uses ansible's hostname as an instance id,
# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
@ -82,7 +82,7 @@ dummy:
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
#rgw_client_name: client.rgw.{{ ansible_hostname }}
#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# CONFIG OVERRIDE #

View File

@ -169,7 +169,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
#ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_osd_docker_cpu_limit: 4
# The next two variables are undefined, and thus, unused by default.

View File

@ -50,7 +50,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_mirror_docker_cpu_limit: 1
#ceph_rbd_mirror_docker_extra_env:

View File

@ -82,7 +82,7 @@ dummy:
#centos_package_dependencies:
# - epel-release
# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
@ -152,7 +152,7 @@ ceph_repository: rhcs
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -180,7 +180,7 @@ ceph_rhcs_version: 5
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
@ -190,7 +190,7 @@ ceph_rhcs_version: 5
# usually has newer Ceph releases than the normal distro repository.
#
#
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
@ -253,7 +253,7 @@ ceph_iscsi_config_dev: false
#ceph_conf_key_directory: /etc/ceph
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
@ -528,7 +528,7 @@ ceph_iscsi_config_dev: false
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}

View File

@ -283,12 +283,12 @@
delegate_to: '{{ groups[mon_group_name][0] }}'
- name: manage nodes with cephadm
command: "{{ ceph_cmd }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
command: "{{ ceph_cmd }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- name: add ceph label for core component
command: "{{ ceph_cmd }} orch host label add {{ ansible_hostname }} ceph"
command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
@ -352,7 +352,7 @@
- name: adopt mon daemon
cephadm_adopt:
name: "mon.{{ ansible_hostname }}"
name: "mon.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
@ -360,7 +360,7 @@
firewalld: "{{ true if configure_firewall | bool else false }}"
- name: reset failed ceph-mon systemd unit
command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
@ -382,7 +382,7 @@
changed_when: false
register: ceph_health_raw
until: >
ansible_hostname in (ceph_health_raw.stdout | from_json)["quorum_names"]
ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
environment:
@ -399,7 +399,7 @@
- name: adopt mgr daemon
cephadm_adopt:
name: "mgr.{{ ansible_hostname }}"
name: "mgr.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
@ -407,7 +407,7 @@
firewalld: "{{ true if configure_firewall | bool else false }}"
- name: reset failed ceph-mgr systemd unit
command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
@ -583,7 +583,7 @@
- name: stop and disable ceph-mds systemd service
service:
name: 'ceph-mds@{{ ansible_hostname }}'
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
@ -596,7 +596,7 @@
when: not containerized_deployment | bool
- name: reset failed ceph-mds systemd unit
command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
@ -615,7 +615,7 @@
- name: remove legacy ceph mds data
file:
path: '/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}'
path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
state: absent
- name: rgw realm/zonegroup/zone requirements
@ -692,7 +692,7 @@
- name: stop and disable ceph-radosgw systemd service
service:
name: 'ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: false
failed_when: false
@ -706,7 +706,7 @@
when: not containerized_deployment | bool
- name: reset failed ceph-radosgw systemd unit
command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}' # noqa 303
command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa 303
changed_when: false
failed_when: false
loop: '{{ rgw_instances }}'
@ -726,13 +726,13 @@
- name: remove legacy ceph radosgw data
file:
path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: absent
loop: '{{ rgw_instances }}'
- name: remove legacy ceph radosgw directory
file:
path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}'
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
state: absent
- name: redeploy rbd-mirror daemons
@ -762,7 +762,7 @@
- name: stop and disable rbd-mirror systemd service
service:
name: 'ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
@ -775,7 +775,7 @@
when: not containerized_deployment | bool
- name: reset failed rbd-mirror systemd unit
command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
@ -938,7 +938,7 @@
- name: adopt alertmanager daemon
cephadm_adopt:
name: "alertmanager.{{ ansible_hostname }}"
name: "alertmanager.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ alertmanager_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
@ -992,7 +992,7 @@
- name: adopt prometheus daemon
cephadm_adopt:
name: "prometheus.{{ ansible_hostname }}"
name: "prometheus.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ prometheus_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
@ -1019,7 +1019,7 @@
- name: adopt grafana daemon
cephadm_adopt:
name: "grafana.{{ ansible_hostname }}"
name: "grafana.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ grafana_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"

View File

@ -231,14 +231,14 @@
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: manage nodes with cephadm
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: add ceph label for core component
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_hostname }} ceph"
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or

View File

@ -71,7 +71,7 @@
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
@ -127,7 +127,7 @@
name: nfs-ganesha
state: stopped
failed_when: false
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge node-exporter
hosts:
@ -249,7 +249,7 @@
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
@ -270,11 +270,11 @@
- name: stop ceph mgrs with systemd
service:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge rgwloadbalancer cluster
@ -318,7 +318,7 @@
- name: stop ceph rgws with systemd
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
@ -340,7 +340,7 @@
- name: stop ceph rbd mirror with systemd
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
failed_when: false
@ -368,7 +368,7 @@
become: false
wait_for:
port: 22
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
state: started
delay: 10
timeout: 500
@ -398,7 +398,7 @@
state: stopped
enabled: no
with_items: "{{ osd_ids.stdout_lines }}"
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: remove ceph udev rules
file:
@ -618,7 +618,7 @@
- name: stop ceph mons with systemd
service:
name: "ceph-{{ item }}@{{ ansible_hostname }}"
name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
@ -740,27 +740,27 @@
yum:
name: "{{ ceph_packages }}"
state: absent
when: ansible_pkg_mgr == 'yum'
when: ansible_facts['pkg_mgr'] == 'yum'
- name: purge ceph packages with dnf
dnf:
name: "{{ ceph_packages }}"
state: absent
when: ansible_pkg_mgr == 'dnf'
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge ceph packages with apt
apt:
name: "{{ ceph_packages }}"
state: absent
purge: true
when: ansible_pkg_mgr == 'apt'
when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge remaining ceph packages with yum
yum:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'yum'
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge remaining ceph packages with dnf
@ -768,7 +768,7 @@
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge remaining ceph packages with apt
@ -776,7 +776,7 @@
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'apt'
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: purge extra packages with yum
@ -784,7 +784,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'yum'
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge extra packages with dnf
@ -792,7 +792,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge extra packages with apt
@ -800,7 +800,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'apt'
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: remove config and any ceph socket left
@ -826,7 +826,7 @@
- name: purge dnf cache
command: dnf clean all
when: ansible_pkg_mgr == 'dnf'
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge rpm cache in /tmp
file:
@ -835,7 +835,7 @@
- name: clean apt
command: apt-get clean # noqa 303
when: ansible_pkg_mgr == 'apt'
when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge ceph repo file in /etc/yum.repos.d
file:
@ -845,7 +845,7 @@
- ceph-dev
- ceph_stable
- rh_storage
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
- name: check for anything running ceph
command: "ps -u ceph -U ceph"
@ -866,7 +866,7 @@
path: "{{ item.path }}"
state: absent
with_items: "{{ systemd_files.files }}"
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge fetch directory

View File

@ -55,7 +55,7 @@
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
@ -104,7 +104,7 @@
- name: disable ceph nfs service
service:
name: "ceph-nfs@{{ ansible_hostname }}"
name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
@ -114,7 +114,7 @@
path: /etc/systemd/system/ceph-nfs@.service
state: absent
- name: remove ceph nfs directories for "{{ ansible_hostname }}"
- name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
file:
path: "{{ item }}"
state: absent
@ -134,7 +134,7 @@
- name: disable ceph mds service
service:
name: "ceph-mds@{{ ansible_hostname }}"
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
@ -181,7 +181,7 @@
- name: disable ceph mgr service
service:
name: "ceph-mgr@{{ ansible_hostname }}"
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
@ -208,7 +208,7 @@
- name: disable ceph rgw service
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
@ -230,7 +230,7 @@
- name: disable ceph rbd-mirror service
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
@ -351,8 +351,8 @@
enabled: no
ignore_errors: true
with_items:
- "ceph-mgr@{{ ansible_hostname }}"
- "ceph-mon@{{ ansible_hostname }}"
- "ceph-mgr@{{ ansible_facts['hostname'] }}"
- "ceph-mon@{{ ansible_facts['hostname'] }}"
- name: remove ceph mon and mgr service
file:
@ -485,7 +485,7 @@
tasks:
- name: stop ceph-crash container
service:
name: "ceph-crash@{{ ansible_hostname }}"
name: "ceph-crash@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
@ -600,7 +600,7 @@
state: absent
update_cache: yes
autoremove: yes
when: ansible_os_family == 'Debian'
when: ansible_facts['os_family'] == 'Debian'
- name: red hat based systems tasks
block:
@ -621,7 +621,7 @@
args:
warn: no
when:
ansible_pkg_mgr == "yum"
ansible_facts['pkg_mgr'] == "yum"
- name: dnf related tasks on red hat
block:
@ -640,9 +640,9 @@
args:
warn: no
when:
ansible_pkg_mgr == "dnf"
ansible_facts['pkg_mgr'] == "dnf"
when:
ansible_os_family == 'RedHat' and
ansible_facts['os_family'] == 'RedHat' and
not is_atomic
- name: find any service-cid file left
@ -682,7 +682,7 @@
become: true
tasks:
- name: purge ceph directories for "{{ ansible_hostname }}" and ceph socket
- name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
file:
path: "{{ item }}"
state: absent

View File

@ -83,7 +83,7 @@
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: get iscsi gateway list

View File

@ -209,14 +209,14 @@
enabled: no
masked: yes
with_items:
- "{{ ansible_hostname }}"
- "{{ ansible_fqdn }}"
- "{{ ansible_facts['hostname'] }}"
- "{{ ansible_facts['fqdn'] }}"
# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
- name: mask the mgr service
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
masked: yes
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
@ -236,7 +236,7 @@
- name: start ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
@ -248,20 +248,20 @@
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
- (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
- (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when: not containerized_deployment | bool
- name: container | waiting for the containerized monitor to join the quorum...
command: >
{{ container_binary }} exec ceph-mon-{{ ansible_hostname }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
- (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
- (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when: containerized_deployment | bool
@ -293,7 +293,7 @@
block:
- name: stop ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
masked: yes
@ -327,7 +327,7 @@
# or if we run a Ceph cluster before Luminous
- name: stop ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
masked: yes
@ -398,7 +398,7 @@
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: stop ceph osd
@ -526,7 +526,7 @@
set_fact:
mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
with_items: "{{ groups[mds_group_name] }}"
when: hostvars[item]['ansible_hostname'] == mds_active_name
when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
- name: create standby_mdss group
add_host:
@ -538,7 +538,7 @@
- name: stop standby ceph mds
systemd:
name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
state: stopped
enabled: no
delegate_to: "{{ item }}"
@ -549,7 +549,7 @@
# somehow, having a single task doesn't work in containerized context
- name: mask systemd units for standby ceph mds
systemd:
name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
masked: yes
delegate_to: "{{ item }}"
with_items: "{{ groups['standby_mdss'] }}"
@ -585,7 +585,7 @@
- name: prevent restart from the packaging
systemd:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
enabled: no
masked: yes
when: not containerized_deployment | bool
@ -605,14 +605,14 @@
- name: restart ceph mds
systemd:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: restarted
enabled: yes
masked: no
when: not containerized_deployment | bool
- name: restart active mds
command: "{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}"
command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
changed_when: false
when: containerized_deployment | bool
@ -631,7 +631,7 @@
- name: prevent restarts from the packaging
systemd:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
enabled: no
masked: yes
when: not containerized_deployment | bool
@ -678,7 +678,7 @@
- name: stop ceph rgw when upgrading from stable-3.2
systemd:
name: ceph-radosgw@rgw.{{ ansible_hostname }}
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
masked: yes
@ -686,7 +686,7 @@
- name: stop ceph rgw
systemd:
name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: stopped
enabled: no
masked: yes
@ -715,7 +715,7 @@
tasks:
- name: stop ceph rbd mirror
systemd:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
masked: yes
@ -759,7 +759,7 @@
- name: systemd stop nfs container
systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
state: stopped
enabled: no
masked: yes
@ -866,7 +866,7 @@
tasks:
- name: stop the ceph-crash service
systemd:
name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: stopped
enabled: no
masked: yes
@ -890,8 +890,8 @@
name: ceph-facts
tasks_from: container_binary.yml
- name: container | disallow pre-pacific OSDs and enable all new pacific-only functionality
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release pacific"
- name: container | disallow pre-quincy OSDs and enable all new quincy-only functionality
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release pacific"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
when:
@ -1004,7 +1004,7 @@
- name: set_fact container_exec_cmd_status
set_fact:
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: show ceph status

View File

@ -61,7 +61,7 @@
- name: set_fact container_exec_cmd for mon0
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
@ -74,7 +74,7 @@
- name: set_fact mds_to_kill_hostname
set_fact:
mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_hostname'] }}"
mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
tasks:
# get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also

View File

@ -39,7 +39,7 @@
- name: set_fact container_exec_cmd
when: containerized_deployment | bool
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
@ -92,7 +92,7 @@
- name: set_fact mgr_to_kill_hostname
set_fact:
mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_hostname'] }}"
mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
tasks:
- name: stop manager services and verify it

View File

@ -76,7 +76,7 @@
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
@ -90,7 +90,7 @@
- name: set_fact mon_to_kill_hostname
set_fact:
mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_hostname'] }}"
mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
- name: stop monitor service(s)
service:

View File

@ -65,7 +65,7 @@
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
@ -93,7 +93,7 @@
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_hostname'] == item.1
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: get ceph-volume lvm list data
ceph_volume:

View File

@ -64,7 +64,7 @@
- name: set_fact container_exec_cmd for mon0
when: containerized_deployment | bool
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
@ -76,7 +76,7 @@
- name: set_fact rbdmirror_to_kill_hostname
set_fact:
rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_hostname'] }}"
rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
- name: set_fact rbdmirror_gids
set_fact:

View File

@ -66,7 +66,7 @@
- name: set_fact container_exec_cmd for mon0
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
@ -95,7 +95,7 @@
set_fact:
rgw_host: '{{ item }}'
with_items: '{{ groups[rgw_group_name] }}'
when: hostvars[item]['ansible_hostname'] == rgw_to_kill.split('.')[0]
when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
- name: stop rgw service
service:

View File

@ -74,7 +74,7 @@
- name: stop non-containerized ceph mon
service:
name: "ceph-mon@{{ ansible_hostname }}"
name: "ceph-mon@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
@ -114,7 +114,7 @@
when: ldb_files.rc == 0
- name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
args:
creates: /etc/ceph/{{ cluster }}.mon.keyring
changed_when: false
@ -137,7 +137,7 @@
- name: waiting for the monitor to join the quorum...
command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
register: ceph_health_raw
until: ansible_hostname in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
changed_when: false
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
@ -158,7 +158,7 @@
# will not exist
- name: stop non-containerized ceph mgr(s)
service:
name: "ceph-mgr@{{ ansible_hostname }}"
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
@ -339,7 +339,7 @@
post_tasks:
- name: container - waiting for clean pgs...
command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
register: ceph_health_post
until: >
(((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
@ -387,7 +387,7 @@
- name: stop non-containerized ceph mds(s)
service:
name: "ceph-mds@{{ ansible_hostname }}"
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
@ -459,7 +459,7 @@
tasks:
- name: stop non-containerized ceph rgw(s)
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
with_items: "{{ rgw_instances }}"
@ -500,7 +500,7 @@
pre_tasks:
- name: stop non-containerized ceph rbd mirror(s)
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no

View File

@ -19,31 +19,31 @@
- name: Check if the node has be migrated already
stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
stat: >
path=/var/lib/ceph/{{ ansible_hostname }}.tar
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: mon_archive_leftover
- fail: msg="Looks like an archive is already there, please remove it!"
when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
- name: Compress the store as much as possible
command: ceph tell mon.{{ ansible_hostname }} compact
command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
when: migration_completed.stat.exists == False
- name: Check if sysvinit
stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
changed_when: False
- name: Check if upstart
stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
changed_when: False
@ -70,7 +70,7 @@
service: >
name=ceph-mon
state=restarted
args=id={{ ansible_hostname }}
args=id={{ ansible_facts['hostname'] }}
when: monupstart.stat.exists == True and migration_completed.stat.exists == False
- name: Restart the Monitor after compaction (Sysvinit)
@ -92,7 +92,7 @@
service: >
name=ceph-mon
state=stopped
args=id={{ ansible_hostname }}
args=id={{ ansible_facts['hostname'] }}
when: monupstart.stat.exists == True and migration_completed.stat.exists == False
- name: Stop the monitor (Sysvinit)
@ -125,15 +125,15 @@
# NOTE (leseb): should we convert upstart to sysvinit here already?
- name: Archive monitor stores
shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_hostname }}.tar
creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Scp the Monitor store
fetch: >
src=/var/lib/ceph/{{ ansible_hostname }}.tar
dest={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
dest={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
flat=yes
when: migration_completed.stat.exists == False
@ -155,13 +155,13 @@
- name: Check if sysvinit
stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
changed_when: False
- name: Check if upstart
stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
changed_when: False
@ -169,7 +169,7 @@
service: >
name=ceph-mon
state=stopped
args=id={{ ansible_hostname }}
args=id={{ ansible_facts['hostname'] }}
when: monupstart.stat.exists == True and migration_completed.stat.exists == False
- name: Make sure the monitor is stopped (Sysvinit)
@ -182,7 +182,7 @@
# NOTE (leseb): 'creates' was added in Ansible 1.6
- name: Copy and unarchive the monitor store
unarchive: >
src={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
src={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
dest=/var/lib/ceph/
copy=yes
mode=0600
@ -218,7 +218,7 @@
- name: Waiting for the monitor to join the quorum...
shell: >
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_hostname }}
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
register: result
until: result.rc == 0
retries: 5
@ -229,7 +229,7 @@
- name: Done moving to the next monitor
file: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
state=touch
owner=root
group=root
@ -252,7 +252,7 @@
- name: Check for failed run
stat: >
path=/var/lib/ceph/{{ ansible_hostname }}.tar
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: osd_archive_leftover
- fail: msg="Looks like an archive is already there, please remove it!"
@ -297,9 +297,9 @@
- name: Archive ceph configs
shell: >
tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_hostname }}.tar
tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_hostname }}.tar
creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Create backup directory
@ -315,7 +315,7 @@
- name: Scp OSDs dirs and configs
fetch: >
src=/var/lib/ceph/{{ ansible_hostname }}.tar
src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
dest={{ backup_dir }}/osds-backups/
flat=yes
when: migration_completed.stat.exists == False
@ -371,7 +371,7 @@
# NOTE (leseb): 'creates' was added in Ansible 1.6
- name: Copy and unarchive the OSD configs
unarchive: >
src={{ backup_dir }}/osds-backups/{{ ansible_hostname }}.tar
src={{ backup_dir }}/osds-backups/{{ ansible_facts['hostname'] }}.tar
dest=/var/lib/ceph/
copy=yes
mode=0600
@ -446,7 +446,7 @@
- name: Check for failed run
stat: >
path=/var/lib/ceph/{{ ansible_hostname }}.tar
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: rgw_archive_leftover
- fail: msg="Looks like an archive is already there, please remove it!"
@ -454,9 +454,9 @@
- name: Archive rados gateway configs
shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_hostname }}.tar
creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Create backup directory
@ -472,7 +472,7 @@
- name: Scp RGWs dirs and configs
fetch: >
src=/var/lib/ceph/{{ ansible_hostname }}.tar
src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
dest={{ backup_dir }}/rgws-backups/
flat=yes
when: migration_completed.stat.exists == False
@ -512,7 +512,7 @@
# NOTE (leseb): 'creates' was added in Ansible 1.6
- name: Copy and unarchive the OSD configs
unarchive: >
src={{ backup_dir }}/rgws-backups/{{ ansible_hostname }}.tar
src={{ backup_dir }}/rgws-backups/{{ ansible_facts['hostname'] }}.tar
dest=/var/lib/ceph/
copy=yes
mode=0600

View File

@ -35,13 +35,13 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "host_vars/{{ ansible_hostname }}.yml"
- "host_vars/{{ ansible_facts['hostname'] }}.yml"
- "host_vars/default.yml"
skip: true
- name: exit playbook, if devices not defined
fail:
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_hostname }}.yml"
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
when: devices is not defined
- name: install sgdisk(gdisk)
@ -67,12 +67,12 @@
- set_fact:
owner: 167
group: 167
when: ansible_os_family == "RedHat"
when: ansible_facts['os_family'] == "RedHat"
- set_fact:
owner: 64045
group: 64045
when: ansible_os_family == "Debian"
when: ansible_facts['os_family'] == "Debian"
- name: change partitions ownership
file:

View File

@ -60,7 +60,7 @@
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster

View File

@ -7,7 +7,7 @@
ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
"rgw keystone api version": "2"
"rgw keystone url": "http://192.168.0.1:35357"
"rgw keystone admin token": "password"
@ -16,7 +16,7 @@ ceph_conf_overrides:
"rgw keystone token cache size": "10000"
"rgw keystone revocation interval": "900"
"rgw s3 auth use keystone": "true"
"nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
"nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
# NOTE (leseb): to authentivate with Keystone you have two options:

View File

@ -7,7 +7,7 @@
ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
"rgw keystone api version": "3"
"rgw keystone url": "http://192.168.0.1:35357"
"rgw keystone admin token": "password"
@ -17,7 +17,7 @@ ceph_conf_overrides:
"rgw keystone token cache size": "10000"
"rgw keystone revocation interval": "900"
"rgw s3 auth use keystone": "true"
"nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
"nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
# NOTE (leseb): to authentivate with Keystone you have two options:

View File

@ -6,6 +6,6 @@
# The double quotes are important, do NOT remove them.
ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
rgw enable static website = true
rgw dns s3website name = objects-website-region.domain.com

View File

@ -6,7 +6,7 @@
# The double quotes are important, do NOT remove them.
ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
rgw enable usage log = true
rgw usage log tick interval = 30
rgw usage log flush threshold = 1024

View File

@ -8,7 +8,7 @@
group_by:
key: _filtered_clients
parents: "{{ client_group_name }}"
when: (ansible_architecture == 'x86_64') or (not containerized_deployment | bool)
when: (ansible_facts['architecture'] == 'x86_64') or (not containerized_deployment | bool)
- name: set_fact delegated_node
set_fact:

View File

@ -6,7 +6,7 @@
create: yes
line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER="
when: ansible_os_family in ["RedHat", "Suse"]
when: ansible_facts['os_family'] in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
@ -19,7 +19,7 @@
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: debian based systems - configure cluster name
when: ansible_os_family == "Debian"
when: ansible_facts['os_family'] == "Debian"
block:
- name: check /etc/default/ceph exist
stat:

View File

@ -7,7 +7,7 @@
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
when:
- ansible_os_family == 'Debian'
- ansible_facts['os_family'] == 'Debian'
- etc_default_ceph.stat.exists
notify: restart ceph osds
@ -18,5 +18,5 @@
create: yes
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
notify: restart ceph osds

View File

@ -21,4 +21,4 @@
args:
warn: no
changed_when: false
when: ansible_pkg_mgr == 'yum'
when: ansible_facts['pkg_mgr'] == 'yum'

View File

@ -15,6 +15,6 @@
- name: configure debian ceph stable community repository
apt_repository:
repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
update_cache: yes

View File

@ -9,6 +9,6 @@
- name: configure debian custom repository
apt_repository:
repo: "deb {{ ceph_custom_repo }} {{ ansible_distribution_release }} main"
repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
state: present
update_cache: yes

View File

@ -1,7 +1,7 @@
---
- name: fetch ceph debian development repository
uri:
url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/repo
url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo
return_content: yes
register: ceph_dev_deb_repo

View File

@ -4,6 +4,6 @@
name: "{{ debian_ceph_pkgs | unique }}"
update_cache: no
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded

View File

@ -5,7 +5,7 @@
state: present
register: result
until: result is succeeded
when: ansible_distribution == 'RedHat'
when: ansible_facts['distribution'] == 'RedHat'
- name: install centos dependencies
yum:
@ -13,7 +13,7 @@
state: present
register: result
until: result is succeeded
when: ansible_distribution == 'CentOS'
when: ansible_facts['distribution'] == 'CentOS'
- name: install redhat ceph packages
package:

View File

@ -1,15 +1,15 @@
---
- name: enable red hat storage monitor repository
rhsm_repository:
name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_architecture }}-rpms"
name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: (mon_group_name in group_names or mgr_group_name in group_names)
- name: enable red hat storage osd repository
rhsm_repository:
name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_architecture }}-rpms"
name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: osd_group_name in group_names
- name: enable red hat storage tools repository
rhsm_repository:
name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_architecture }}-rpms"
name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: (mgr_group_name in group_names or rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names or monitoring_group_name in group_names)

View File

@ -5,7 +5,7 @@
register: result
until: result is succeeded
tags: with_pkg
when: ansible_distribution_major_version | int == 7
when: ansible_facts['distribution_major_version'] | int == 7
- name: configure red hat ceph community repository stable key
rpm_key:
@ -21,7 +21,7 @@
gpgcheck: yes
state: present
gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/$basearch"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
file: ceph_stable
priority: 2
register: result
@ -34,7 +34,7 @@
gpgcheck: yes
state: present
gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/noarch"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"
file: ceph_stable
priority: 2
register: result

View File

@ -1,7 +1,7 @@
---
- name: get latest available build
uri:
url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/{{ ansible_facts['architecture'] }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
return_content: yes
run_once: true
register: latest_build

View File

@ -1,22 +1,22 @@
---
- name: include_tasks installs/install_on_redhat.yml
include_tasks: installs/install_on_redhat.yml
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
tags: package-install
- name: include_tasks installs/install_on_suse.yml
include_tasks: installs/install_on_suse.yml
when: ansible_os_family == 'Suse'
when: ansible_facts['os_family'] == 'Suse'
tags: package-install
- name: include installs/install_on_debian.yml
include_tasks: installs/install_on_debian.yml
tags: package-install
when: ansible_os_family == 'Debian'
when: ansible_facts['os_family'] == 'Debian'
- name: include_tasks installs/install_on_clear.yml
include_tasks: installs/install_on_clear.yml
when: ansible_os_family == 'ClearLinux'
when: ansible_facts['os_family'] == 'ClearLinux'
tags: package-install
- name: get ceph version

View File

@ -1,7 +1,7 @@
---
- name: create rados gateway instance directories
file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -10,7 +10,7 @@
- name: generate environment file
copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/EnvironmentFile"
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
owner: "root"
group: "root"
mode: "0644"

View File

@ -24,8 +24,8 @@ osd crush chooseleaf type = 0
{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
mon initial members = {% for host in groups[mon_group_name] %}
{% if hostvars[host]['ansible_hostname'] is defined -%}
{{ hostvars[host]['ansible_hostname'] }}
{% if hostvars[host]['ansible_facts']['hostname'] is defined -%}
{{ hostvars[host]['ansible_facts']['hostname'] }}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
@ -84,13 +84,13 @@ filestore xattr use omap = true
[osd]
{% if is_hci | bool and _num_osds > 0 %}
{# hci_safety_factor is the safety factor for HCI deployments #}
{% if ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds) | int %}
{% if ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds) | int %}
{% endif %}
{% elif _num_osds > 0 %}
{# non_hci_safety_factor is the safety factor for dedicated nodes #}
{% if ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds) | int %}
{% if ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds) | int %}
{% endif %}
{% endif %}
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
@ -98,14 +98,14 @@ osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
{% endif %}
{% if inventory_hostname in groups.get(rgw_group_name, []) %}
{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_hostname']) %}
{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
{% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
{% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
host = {{ _rgw_hostname }}
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_hostname'] + '.' + instance['instance_name'] }}.log
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
{% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
{%- macro frontend_line(frontend_type) -%}
{%- if frontend_type == 'civetweb' -%}
@ -131,12 +131,12 @@ rgw_zone = {{ instance['rgw_zone'] }}
{% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
{% for host in groups[nfs_group_name] %}
{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
{% if nfs_obj_gw | bool %}
[client.rgw.{{ _rgw_hostname }}]
host = {{ _rgw_hostname }}
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
{% endif %}
{% endfor %}
{% endif %}

View File

@ -14,14 +14,14 @@
- name: add docker's gpg key
apt_key:
url: 'https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg'
url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
register: result
until: result is succeeded
when: container_package_name == 'docker-ce'
- name: add docker repository
apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
when: container_package_name == 'docker-ce'
- name: add podman ppa repository
@ -29,4 +29,4 @@
repo: "ppa:projectatomic/ppa"
when:
- container_package_name == 'podman'
- ansible_distribution == 'Ubuntu'
- ansible_facts['distribution'] == 'Ubuntu'

View File

@ -2,13 +2,13 @@
- name: include specific variables
include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ ansible_os_family }}.yml"
- "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
- "{{ ansible_facts['os_family'] }}.yml"
- name: debian based systems tasks
include_tasks: debian_prerequisites.yml
when:
- ansible_os_family == 'Debian'
- ansible_facts['os_family'] == 'Debian'
tags: with_pkg
- name: install container packages

View File

@ -57,7 +57,7 @@
- name: start the ceph-crash service
systemd:
name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: started
enabled: yes
masked: no

View File

@ -1,7 +1,7 @@
---
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: set_fact container_run_cmd
@ -281,7 +281,7 @@
- name: add iscsi gateways - ipv4
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -291,7 +291,7 @@
- name: add iscsi gateways - ipv6
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -1,16 +1,16 @@
---
- name: get current mgr backend - ipv4
set_fact:
mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
when: ip_version == 'ipv4'
- name: get current mgr backend - ipv6
set_fact:
mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
when: ip_version == 'ipv6'
- name: config the current dashboard backend
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_hostname'] }}/server_addr {{ mgr_server_addr }}"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_facts']['hostname'] }}/server_addr {{ mgr_server_addr }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
run_once: true

View File

@ -4,5 +4,5 @@
- name: print dashboard URL
debug:
msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_fqdn }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
run_once: true

View File

@ -74,7 +74,7 @@ debian_package_dependencies: []
centos_package_dependencies:
- epel-release
- "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
- "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
redhat_package_dependencies: []
@ -144,7 +144,7 @@ nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_s
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -172,7 +172,7 @@ ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default(
#
ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
ceph_stable_openstack_release_uca: queens
ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
@ -182,7 +182,7 @@ ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_sta
# usually has newer Ceph releases than the normal distro repository.
#
#
ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
@ -245,7 +245,7 @@ generate_fsid: true
ceph_conf_key_directory: /etc/ceph
ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
ceph_keyring_permissions: '0600'
@ -520,7 +520,7 @@ rgw_zone: default
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
ceph_conf_overrides: {}
@ -788,4 +788,4 @@ client_connections: {}
container_exec_cmd:
docker: false
ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -6,4 +6,4 @@
- name: set_fact container_binary
set_fact:
container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"

View File

@ -26,7 +26,7 @@
- name: set_fact monitor_name ansible_hostname
set_fact:
monitor_name: "{{ hostvars[item]['ansible_hostname'] }}"
monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups.get(mon_group_name, []) }}"
@ -38,12 +38,12 @@
block:
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
- name: find a running mon container
command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
register: find_running_mon_container
failed_when: false
run_once: true
@ -98,7 +98,7 @@
- name: set_fact _container_exec_cmd
set_fact:
_container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_hostname'] }}"
_container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
@ -125,7 +125,7 @@
when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0
- name: get current fsid
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}.asok config get fsid"
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
until: rolling_update_fsid is succeeded
@ -249,11 +249,11 @@
- name: set_fact devices generate device list when osd_auto_discovery
set_fact:
devices: "{{ (devices | default([]) + [ item.key | regex_replace('^', '/dev/') ]) | unique }}"
with_dict: "{{ ansible_devices }}"
with_dict: "{{ ansible_facts['devices'] }}"
when:
- osd_auto_discovery | default(False) | bool
- inventory_hostname in groups.get(osd_group_name, [])
- ansible_devices is defined
- ansible_facts['devices'] is defined
- item.value.removable == "0"
- item.value.sectors != "0"
- item.value.partitions|count == 0
@ -282,9 +282,9 @@
- name: set_fact rgw_hostname
set_fact:
rgw_hostname: "{% set _value = ansible_hostname -%}
rgw_hostname: "{% set _value = ansible_facts['hostname'] -%}
{% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
{% if key == ansible_fqdn -%}
{% if key == ansible_facts['fqdn'] -%}
{% set _value = key -%}
{% endif -%}
{% endfor -%}

View File

@ -1,6 +1,6 @@
- name: set grafana_server_addr fact - ipv4
set_fact:
grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- ip_version == 'ipv4'
@ -9,7 +9,7 @@
- name: set grafana_server_addr fact - ipv6
set_fact:
grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- ip_version == 'ipv6'
@ -18,7 +18,7 @@
- name: set grafana_server_addrs fact - ipv4
set_fact:
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
@ -27,7 +27,7 @@
- name: set grafana_server_addrs fact - ipv6
set_fact:
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
- groups.get(monitoring_group_name, []) | length > 0

View File

@ -1,7 +1,7 @@
---
- name: set_fact _monitor_addresses to monitor_address_block ipv4
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
@ -11,7 +11,7 @@
- name: set_fact _monitor_addresses to monitor_address_block ipv6
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
@ -30,7 +30,7 @@
- name: set_fact _monitor_addresses to monitor_interface - ipv4
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
@ -41,7 +41,7 @@
- name: set_fact _monitor_addresses to monitor_interface - ipv6
set_fact:
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
_monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"

View File

@ -1,7 +1,7 @@
---
- name: set_fact _radosgw_address to radosgw_address_block ipv4
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
@ -9,7 +9,7 @@
- name: set_fact _radosgw_address to radosgw_address_block ipv6
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
@ -30,16 +30,16 @@
block:
- name: set_fact _interface
set_fact:
_interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
_interface: "{{ (radosgw_interface | replace('-', '_')) }}"
- name: set_fact _radosgw_address to radosgw_interface - ipv4
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version]['address'] }}"
when: ip_version == 'ipv4'
- name: set_fact _radosgw_address to radosgw_interface - ipv6
set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] | ipwrap }}"
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version][0]['address'] | ipwrap }}"
when: ip_version == 'ipv6'
- name: set_fact rgw_instances without rgw multisite

View File

@ -7,7 +7,7 @@
until: result is succeeded
when:
- not containerized_deployment | bool
- ansible_os_family in ['RedHat', 'Suse']
- ansible_facts['os_family'] in ['RedHat', 'Suse']
tags: package-install
- name: make sure grafana is down
@ -41,7 +41,7 @@
with_items: "{{ grafana_dashboard_files }}"
when:
- not containerized_deployment | bool
- not ansible_os_family in ['RedHat', 'Suse']
- not ansible_facts['os_family'] in ['RedHat', 'Suse']
- name: write grafana.ini
config_template:

View File

@ -18,7 +18,7 @@ org_role = Viewer
[server]
cert_file = /etc/grafana/ceph-dashboard.crt
cert_key = /etc/grafana/ceph-dashboard.key
domain = {{ ansible_fqdn }}
domain = {{ ansible_facts['fqdn'] }}
protocol = {{ dashboard_protocol }}
http_port = {{ grafana_port }}
http_addr = {{ grafana_server_addr }}

View File

@ -1,6 +1,6 @@
---
- name: check for a mon container
command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
register: ceph_mon_container_stat
changed_when: false
failed_when: false
@ -16,7 +16,7 @@
when: inventory_hostname in groups.get(osd_group_name, [])
- name: check for a mds container
command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
register: ceph_mds_container_stat
changed_when: false
failed_when: false
@ -24,7 +24,7 @@
when: inventory_hostname in groups.get(mds_group_name, [])
- name: check for a rgw container
command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
register: ceph_rgw_container_stat
changed_when: false
failed_when: false
@ -32,7 +32,7 @@
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: check for a mgr container
command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
register: ceph_mgr_container_stat
changed_when: false
failed_when: false
@ -40,7 +40,7 @@
when: inventory_hostname in groups.get(mgr_group_name, [])
- name: check for a rbd mirror container
command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
register: ceph_rbd_mirror_container_stat
changed_when: false
failed_when: false
@ -48,7 +48,7 @@
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check for a nfs container
command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
register: ceph_nfs_container_stat
changed_when: false
failed_when: false
@ -80,7 +80,7 @@
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a ceph-crash container
command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_hostname }}'"
command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
register: ceph_crash_container_stat
changed_when: false
failed_when: false

View File

@ -5,7 +5,7 @@
- name: restart the ceph-crash service
systemd:
name: ceph-crash@{{ ansible_hostname }}
name: ceph-crash@{{ ansible_facts['hostname'] }}
state: restarted
enabled: yes
masked: no

View File

@ -2,14 +2,14 @@
RETRIES="{{ handler_health_mds_check_retries }}"
DELAY="{{ handler_health_mds_check_delay }}"
MDS_NAME="{{ ansible_hostname }}"
MDS_NAME="{{ ansible_facts['hostname'] }}"
{% if containerized_deployment | bool %}
DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }}"
{% endif %}
# Backward compatibility
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok
# First, restart the daemon
systemctl restart ceph-mds@${MDS_NAME}

View File

@ -2,14 +2,14 @@
RETRIES="{{ handler_health_mgr_check_retries }}"
DELAY="{{ handler_health_mgr_check_delay }}"
MGR_NAME="{{ ansible_hostname }}"
MGR_NAME="{{ ansible_facts['hostname'] }}"
{% if containerized_deployment | bool %}
DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_hostname }}"
DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_facts['hostname'] }}"
{% endif %}
# Backward compatibility
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok
systemctl reset-failed ceph-mgr@${MGR_NAME}
# First, restart the daemon

View File

@ -4,18 +4,18 @@ RETRIES="{{ handler_health_mon_check_retries }}"
DELAY="{{ handler_health_mon_check_delay }}"
MONITOR_NAME="{{ monitor_name }}"
{% if containerized_deployment | bool %}
DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
{% endif %}
# if daemon is uninstalled, no restarting is needed; so exit with success
systemctl status ceph-mon@{{ ansible_hostname }} > /dev/null
systemctl status ceph-mon@{{ ansible_facts['hostname'] }} > /dev/null
if [[ $? -ne 0 ]]; then
exit 0
fi
# Backward compatibility
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok
check_quorum() {
while [ $RETRIES -ne 0 ]; do
@ -34,7 +34,7 @@ exit 1
}
# First, restart the daemon
systemctl restart ceph-mon@{{ ansible_hostname }}
systemctl restart ceph-mon@{{ ansible_facts['hostname'] }}
COUNT=10
# Wait and ensure the socket exists after restarting the daemon
@ -45,5 +45,5 @@ while [ $COUNT -ne 0 ]; do
done
# If we reach this point, it means the socket is not present.
echo "Socket file ${SOCKET} could not be found, which means the monitor is not running. Showing ceph-mon unit logs now:"
journalctl -u ceph-mon@{{ ansible_hostname }}
journalctl -u ceph-mon@{{ ansible_facts['hostname'] }}
exit 1

View File

@ -2,10 +2,10 @@
RETRIES="{{ handler_health_nfs_check_retries }}"
DELAY="{{ handler_health_nfs_check_delay }}"
NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
PID=/var/run/ganesha.pid
{% if containerized_deployment | bool %}
DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
{% endif %}
# First, restart the daemon

View File

@ -2,14 +2,14 @@
RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
DELAY="{{ handler_health_rbd_mirror_check_delay }}"
RBD_MIRROR_NAME="{{ ansible_hostname }}"
RBD_MIRROR_NAME="{{ ansible_facts['hostname'] }}"
{% if containerized_deployment | bool %}
DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
{% endif %}
# Backward compatibility
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok
# First, restart the daemon
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}

View File

@ -2,7 +2,7 @@
RETRIES="{{ handler_health_rgw_check_retries }}"
DELAY="{{ handler_health_rgw_check_delay }}"
HOST_NAME="{{ ansible_hostname }}"
HOST_NAME="{{ ansible_facts['hostname'] }}"
RGW_NUMS={{ rgw_instances | length | int }}
RGW_FRONTEND_SSL_CERT={{ radosgw_frontend_ssl_certificate }}
if [ -n "$RGW_FRONTEND_SSL_CERT" ]; then

View File

@ -15,7 +15,7 @@
block:
- name: install firewalld python binding
package:
name: "python{{ ansible_python.version.major }}-firewall"
name: "python{{ ansible_facts['python']['version']['major'] }}-firewall"
tags: with_pkg
when: not is_atomic | bool

View File

@ -2,7 +2,7 @@
- name: update cache for Debian based OSs
apt:
update_cache: yes
when: ansible_os_family == "Debian"
when: ansible_facts['os_family'] == "Debian"
register: result
until: result is succeeded
@ -10,7 +10,7 @@
include_tasks: configure_firewall.yml
when:
- configure_firewall | bool
- ansible_os_family in ['RedHat', 'Suse']
- ansible_facts['os_family'] in ['RedHat', 'Suse']
tags: configure_firewall
- name: include_tasks setup_ntp.yml

View File

@ -3,13 +3,13 @@
set_fact:
chrony_daemon_name: chrony
ntp_service_name: ntp
when: ansible_os_family == 'Debian'
when: ansible_facts['os_family'] == 'Debian'
- name: set ntp service and chrony daemon name for RedHat and Suse family
set_fact:
chrony_daemon_name: chronyd
ntp_service_name: ntpd
when: ansible_os_family in ['RedHat', 'Suse']
when: ansible_facts['os_family'] in ['RedHat', 'Suse']
# Installation of NTP daemons needs to be a separate task since installations
# can't happen on Atomic

View File

@ -35,13 +35,13 @@ trusted_ip_list: 192.168.122.1
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_target_api_docker_cpu_limit: 1

View File

@ -28,7 +28,7 @@
- name: add mgr ip address to trusted list with dashboard - ipv4
set_fact:
trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
@ -36,7 +36,7 @@
- name: add mgr ip address to trusted list with dashboard - ipv6
set_fact:
trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
@ -53,7 +53,7 @@
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment | bool
@ -67,4 +67,4 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

View File

@ -33,7 +33,7 @@
command: >
openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
-x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
-subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_hostname }}"
-subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
delegate_to: localhost
run_once: True
with_items: "{{ crt_files_exist.results }}"

View File

@ -1,6 +1,6 @@
---
- name: red hat based systems tasks
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
block:
- name: set_fact common_pkgs
set_fact:
@ -76,7 +76,7 @@
- name: ceph-iscsi stable repository
get_url:
url: 'https://download.ceph.com/ceph-iscsi/{{ "3" if use_new_ceph_iscsi | bool else "2" }}/rpm/el{{ ansible_distribution_major_version }}/ceph-iscsi.repo'
url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
dest: /etc/yum.repos.d/ceph-iscsi.repo
force: true
register: result

View File

@ -19,13 +19,13 @@ copy_admin_key: false
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_mds_docker_cpu_limit: 4
# we currently for MDS_NAME to hostname because of a bug in ceph-docker
# fix here: https://github.com/ceph/ceph-docker/pull/770
# this will go away soon.
ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
ceph_config_keys: [] # DON'T TOUCH ME

View File

@ -8,7 +8,7 @@
mode: "{{ ceph_directories_mode }}"
with_items:
- /var/lib/ceph/bootstrap-mds/
- /var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}
- /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}
- name: get keys from monitors
ceph_key:

View File

@ -4,14 +4,14 @@
- name: systemd start mds container
systemd:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
daemon_reload: yes
- name: wait for mds socket to exist
command: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'"
command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
changed_when: false
register: multi_mds_socket
retries: 5

View File

@ -3,10 +3,10 @@
apt:
name: ceph-mds
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
when:
- mds_group_name in group_names
- ansible_os_family == 'Debian'
- ansible_facts['os_family'] == 'Debian'
register: result
until: result is succeeded
@ -18,11 +18,11 @@
until: result is succeeded
when:
- mds_group_name in group_names
- ansible_os_family in ['Suse', 'RedHat']
- ansible_facts['os_family'] in ['Suse', 'RedHat']
- name: create mds keyring
ceph_key:
name: "mds.{{ ansible_hostname }}"
name: "mds.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
user: client.bootstrap-mds
user_key: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
@ -30,7 +30,7 @@
mon: "allow profile mds"
mds: "allow"
osd: "allow rwx"
dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}/keyring"
dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
import_key: false
owner: ceph
group: ceph
@ -43,7 +43,7 @@
path: "/etc/systemd/system/ceph-mds@.service.d/"
when:
- ceph_mds_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-mds systemd service overrides
config_template:
@ -53,11 +53,11 @@
config_type: "ini"
when:
- ceph_mds_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- ansible_facts['service_mgr'] == 'systemd'
- name: start and add that the metadata service to the init sequence
service:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no

View File

@ -6,17 +6,17 @@ Requires=docker.service
{% else %}
After=network.target
{% endif %}
{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_mds_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_mds_docker_cpu_limit|int %}
[Service]
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_facts['hostname'] }}
{% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_facts['hostname'] }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@ -32,12 +32,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
-e CEPH_DAEMON=MDS \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ ceph_mds_docker_extra_env }} \
--name=ceph-mds-{{ ansible_hostname }} \
--name=ceph-mds-{{ ansible_facts['hostname'] }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
{% endif %}
KillMode=none
Restart=always

View File

@ -33,7 +33,7 @@ ceph_mgr_packages:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_mgr_docker_cpu_limit: 1
ceph_mgr_docker_extra_env:

View File

@ -1,7 +1,7 @@
---
- name: create mgr directory
file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}
path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -9,7 +9,7 @@
- name: fetch ceph mgr keyring
ceph_key:
name: "mgr.{{ ansible_hostname }}"
name: "mgr.{{ ansible_facts['hostname'] }}"
caps:
mon: allow profile mgr
osd: allow *
@ -19,7 +19,7 @@
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "0400"
dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring"
dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
@ -30,7 +30,7 @@
block:
- name: create ceph mgr keyring(s) on a mon node
ceph_key:
name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}"
caps:
mon: allow profile mgr
osd: allow *
@ -51,7 +51,7 @@
set_fact:
_mgr_keys:
- { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': copy_admin_key }
- { 'name': "mgr.{{ ansible_hostname }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring", 'copy_key': true }
- { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true }
- name: get keys from monitors
ceph_key:
@ -84,7 +84,7 @@
- name: set mgr key permissions
file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring
path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"

View File

@ -1,7 +1,7 @@
---
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true

View File

@ -1,10 +1,10 @@
---
- name: set_fact ceph_mgr_packages for sso
set_fact:
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_distribution_major_version | int == 8 else 'python-saml']) }}"
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}"
when:
- dashboard_enabled | bool
- ansible_distribution == 'RedHat'
- ansible_facts['distribution'] == 'RedHat'
- name: set_fact ceph_mgr_packages for dashboard
set_fact:
@ -15,8 +15,8 @@
set_fact:
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}"
when:
- ansible_os_family != 'RedHat'
- ansible_distribution_major_version | int != 7
- ansible_facts['os_family'] != 'RedHat'
- ansible_facts['distribution_major_version'] | int != 7
- name: install ceph-mgr packages on RedHat or SUSE
package:
@ -24,13 +24,13 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
when: ansible_os_family in ['RedHat', 'Suse']
when: ansible_facts['os_family'] in ['RedHat', 'Suse']
- name: install ceph-mgr packages for debian
apt:
name: '{{ ceph_mgr_packages }}'
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded
when: ansible_os_family == 'Debian'
when: ansible_facts['os_family'] == 'Debian'

View File

@ -5,7 +5,7 @@
path: "/etc/systemd/system/ceph-mgr@.service.d/"
when:
- ceph_mgr_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-mgr systemd service overrides
config_template:
@ -15,7 +15,7 @@
config_type: "ini"
when:
- ceph_mgr_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- ansible_facts['service_mgr'] == 'systemd'
- name: include_tasks systemd.yml
include_tasks: systemd.yml
@ -23,7 +23,7 @@
- name: systemd start mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no

View File

@ -11,11 +11,11 @@ After=network.target
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_facts['hostname'] }}
{% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_facts['hostname'] }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@ -31,12 +31,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
-e CEPH_DAEMON=MGR \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ ceph_mgr_docker_extra_env }} \
--name=ceph-mgr-{{ ansible_hostname }} \
--name=ceph-mgr-{{ ansible_facts['hostname'] }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
{% endif %}
KillMode=none
Restart=always

View File

@ -37,7 +37,7 @@ client_admin_ceph_authtool_cap:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_mon_docker_cpu_limit: 1
ceph_mon_container_listen_port: 3300

View File

@ -4,7 +4,7 @@
{{ container_exec_cmd }}
ceph
--cluster {{ cluster }}
daemon mon.{{ ansible_hostname }}
daemon mon.{{ ansible_facts['hostname'] }}
mon_status
--format json
register: ceph_health_raw

View File

@ -7,7 +7,7 @@
name: mon.
cluster: "{{ cluster }}"
user: mon.
user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_hostname'] }}/keyring"
user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_facts']['hostname'] }}/keyring"
output_format: json
state: info
environment:

View File

@ -1,7 +1,7 @@
---
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: include deploy_monitors.yml

View File

@ -6,7 +6,7 @@
when:
- not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-mon systemd service overrides
config_template:
@ -17,7 +17,7 @@
when:
- not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
- ansible_facts['service_mgr'] == 'systemd'
- name: include_tasks systemd.yml
include_tasks: systemd.yml
@ -25,7 +25,7 @@
- name: start the monitor service
systemd:
name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_hostname }}
name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no

View File

@ -28,7 +28,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
-v /var/run/ceph:/var/run/ceph:z \
-v /etc/localtime:/etc/localtime:ro \
-v /var/log/ceph:/var/log/ceph:z \
{% if ansible_distribution == 'RedHat' -%}
{% if ansible_facts['distribution'] == 'RedHat' -%}
-v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
{% endif -%}
{% if mon_docker_privileged | bool -%}

View File

@ -17,7 +17,7 @@ copy_admin_key: false
ceph_nfs_enable_service: true
# ceph-nfs systemd service uses ansible's hostname as an instance id,
# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
@ -74,7 +74,7 @@ ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
rgw_client_name: client.rgw.{{ ansible_hostname }}
rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# CONFIG OVERRIDE #

View File

@ -1,7 +1,7 @@
---
- name: set_fact container_exec_cmd_nfs
set_fact:
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"

View File

@ -15,7 +15,7 @@
state: present
register: result
until: result is succeeded
when: ansible_distribution_major_version == '7'
when: ansible_facts['distribution_major_version'] == '7'
- name: install nfs-ganesha-selinux and python3-policycoreutils on RHEL 8
package:
@ -23,7 +23,7 @@
state: present
register: result
until: result is succeeded
when: ansible_distribution_major_version == '8'
when: ansible_facts['distribution_major_version'] == '8'
- name: add ganesha_t to permissive domain
selinux_permissive:

View File

@ -2,7 +2,7 @@
# global/common requirement
- name: stop nfs server service
systemd:
name: "{{ 'nfs-server' if ansible_os_family == 'RedHat' else 'nfsserver' if ansible_os_family == 'Suse' else 'nfs-kernel-server' if ansible_os_family == 'Debian' }}"
name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
state: stopped
enabled: no
failed_when: false
@ -24,7 +24,7 @@
import_tasks: ganesha_selinux_fix.yml
when:
- not containerized_deployment | bool
- ansible_os_family == 'RedHat'
- ansible_facts['os_family'] == 'RedHat'
- name: nfs with external ceph cluster task related
when:
@ -40,7 +40,7 @@
mode: "0755"
with_items:
- "{{ ceph_nfs_ceph_user }}"
- "{{ ansible_hostname }}"
- "{{ ansible_facts['hostname'] }}"
- name: set_fact rgw_client_name
set_fact:
@ -55,7 +55,7 @@
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
with_nested:
- "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
- ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring']
- ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
when:
- not item.0.get('skipped', False)
- item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name

View File

@ -3,7 +3,7 @@
block:
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true

View File

@ -1,11 +1,11 @@
---
- name: include red hat based system related tasks
include_tasks: pre_requisite_non_container_red_hat.yml
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
- name: include debian based system related tasks
include_tasks: pre_requisite_non_container_debian.yml
when: ansible_os_family == 'Debian'
when: ansible_facts['os_family'] == 'Debian'
- name: install nfs rgw/cephfs gateway - SUSE/openSUSE
zypper:
@ -18,7 +18,7 @@
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs'
- ansible_os_family == 'Suse'
- ansible_facts['os_family'] == 'Suse'
- item.install | bool
register: result
until: result is succeeded
@ -35,7 +35,7 @@
with_items:
- { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
- { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
- { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}", create: "{{ nfs_obj_gw }}" }
- { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
- { name: "/var/log/ceph", create: true }
- { name: "/var/log/ganesha", create: true, owner: root, group: root }
@ -80,14 +80,14 @@
block:
- name: create rados gateway keyring
ceph_key:
name: "client.rgw.{{ ansible_hostname }}"
name: "client.rgw.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
user: client.bootstrap-rgw
user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
caps:
mon: "allow rw"
osd: "allow rwx"
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring"
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"
import_key: false
owner: ceph
group: ceph

View File

@ -9,7 +9,7 @@
block:
- name: add nfs-ganesha stable repository
apt_repository:
repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
update_cache: no
register: add_ganesha_apt_repo
@ -30,7 +30,7 @@
block:
- name: fetch nfs-ganesha development repository
uri:
url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/flavors/{{ nfs_ganesha_flavor }}/repo
url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo
return_content: yes
register: nfs_ganesha_dev_apt_repo

View File

@ -31,7 +31,7 @@
block:
- name: add nfs-ganesha dev repo
get_url:
url: 'https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/flavors/{{ nfs_ganesha_flavor }}/repo'
url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo"
dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
force: true
when:

View File

@ -8,7 +8,7 @@
- name: set_fact container_exec_cmd_nfs - internal
set_fact:
exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }} rados"
exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
delegate_node: "{{ groups[mon_group_name][0] }}"
when: groups.get(mon_group_name, []) | length > 0
@ -72,7 +72,7 @@
- name: systemd start nfs container
systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
state: started
enabled: yes
masked: no

View File

@ -15,7 +15,7 @@ ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
ExecStartPre={{ '/bin/mkdir' if ansible_os_family == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@ -36,7 +36,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
-e CEPH_DAEMON=NFS \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ ceph_nfs_docker_extra_env }} \
--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"

Some files were not shown because too many files have changed in this diff Show More