From 9780490b2f7f850aa74c61f4a7810dc4b23fb6a1 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 25 Mar 2021 03:47:40 +0100 Subject: [PATCH] convert some missed `ansible_*`` calls to `ansible_facts['*']` This converts some missed calls to `ansible_*` that were missed in initial PR #6312 Signed-off-by: Guillaume Abrioux (cherry picked from commit 0163ecc924ccec3e5da9a9c4cfb8ac4558ec33ef) --- group_vars/nfss.yml.sample | 2 +- group_vars/osds.yml.sample | 2 +- roles/ceph-nfs/defaults/main.yml | 2 +- roles/ceph-osd/defaults/main.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/group_vars/nfss.yml.sample b/group_vars/nfss.yml.sample index 2abf4554d..fc18785c9 100644 --- a/group_vars/nfss.yml.sample +++ b/group_vars/nfss.yml.sample @@ -29,7 +29,7 @@ dummy: # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in # such case it's better to have constant instance id instead which # can be set by 'ceph_nfs_service_suffix' -# ceph_nfs_service_suffix: ansible_hostname +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" ####################### # Access type options # diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 6e418a7ce..34ea07858 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -58,7 +58,7 @@ dummy: #bluestore_wal_devices: [] #'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. -# Device discovery is based on the Ansible fact 'ansible_devices' +# Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using # this option since ceph-volume has a built-in check which looks for empty devices. diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml index 5f058525e..0029e7dbc 100644 --- a/roles/ceph-nfs/defaults/main.yml +++ b/roles/ceph-nfs/defaults/main.yml @@ -21,7 +21,7 @@ ceph_nfs_enable_service: true # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in # such case it's better to have constant instance id instead which # can be set by 'ceph_nfs_service_suffix' -# ceph_nfs_service_suffix: ansible_hostname +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" ####################### # Access type options # diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 8084140d4..940d89912 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -50,7 +50,7 @@ dedicated_devices: [] bluestore_wal_devices: [] #'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. -# Device discovery is based on the Ansible fact 'ansible_devices' +# Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using # this option since ceph-volume has a built-in check which looks for empty devices.