From 8a4fd99db77b8a7ef3eb1c69d1ed5085a4edf573 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 25 Mar 2021 03:47:40 +0100 Subject: [PATCH] convert some missed `ansible_*`` calls to `ansible_facts['*']` This converts some missed calls to `ansible_*` that were missed in initial PR #6312 Signed-off-by: Guillaume Abrioux (cherry picked from commit 0163ecc924ccec3e5da9a9c4cfb8ac4558ec33ef) --- group_vars/nfss.yml.sample | 2 +- group_vars/osds.yml.sample | 2 +- roles/ceph-facts/tasks/facts.yml | 2 +- roles/ceph-nfs/defaults/main.yml | 2 +- roles/ceph-osd/defaults/main.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/group_vars/nfss.yml.sample b/group_vars/nfss.yml.sample index 2e87b1fc8..3c1cdb9be 100644 --- a/group_vars/nfss.yml.sample +++ b/group_vars/nfss.yml.sample @@ -29,7 +29,7 @@ dummy: # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in # such case it's better to have constant instance id instead which # can be set by 'ceph_nfs_service_suffix' -# ceph_nfs_service_suffix: ansible_hostname +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" ###################### # NFS Ganesha Config # diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 91abec986..a57c1f3f4 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -58,7 +58,7 @@ dummy: #bluestore_wal_devices: [] #'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. -# Device discovery is based on the Ansible fact 'ansible_devices' +# Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using # this option since ceph-volume has a built-in check which looks for empty devices. diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml index 110f3184d..54425dcc9 100644 --- a/roles/ceph-facts/tasks/facts.yml +++ b/roles/ceph-facts/tasks/facts.yml @@ -34,7 +34,7 @@ set_fact: ceph_release: "{{ ceph_stable_release }}" -- name: set_fact monitor_name ansible_hostname +- name: set_fact monitor_name ansible_facts['hostname'] set_fact: monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}" delegate_to: "{{ item }}" diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml index 7bda075de..59f95fc2e 100644 --- a/roles/ceph-nfs/defaults/main.yml +++ b/roles/ceph-nfs/defaults/main.yml @@ -21,7 +21,7 @@ ceph_nfs_enable_service: true # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in # such case it's better to have constant instance id instead which # can be set by 'ceph_nfs_service_suffix' -# ceph_nfs_service_suffix: ansible_hostname +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" ###################### # NFS Ganesha Config # diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index db21e623e..8eb1bebf3 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -50,7 +50,7 @@ dedicated_devices: [] bluestore_wal_devices: [] #'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. -# Device discovery is based on the Ansible fact 'ansible_devices' +# Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using # this option since ceph-volume has a built-in check which looks for empty devices.