From 8ed6dc10a81fd6d0216a59b850cb057bcf1534f9 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 11 Aug 2020 15:11:23 +0200 Subject: [PATCH] container: call container_exec_cmd with right path This task is delegated on the first mon, so we should call the fact with the right path. Signed-off-by: Guillaume Abrioux --- .../filestore-to-bluestore.yml | 10 +++++----- infrastructure-playbooks/rolling_update.yml | 4 ++-- roles/ceph-client/tasks/pre_requisite.yml | 2 +- roles/ceph-iscsi-gw/tasks/common.yml | 2 +- roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml | 4 ++-- roles/ceph-mds/tasks/common.yml | 2 +- roles/ceph-mon/tasks/ceph_keys.yml | 5 +++-- roles/ceph-mon/tasks/secure_cluster.yml | 6 ++++-- roles/ceph-rbd-mirror/tasks/common.yml | 2 +- roles/ceph-rgw/tasks/common.yml | 2 +- roles/ceph-rgw/tasks/multisite/checks.yml | 6 +++--- .../ceph-rgw/tasks/multisite/create_zone_user.yml | 4 ++-- roles/ceph-rgw/tasks/multisite/master.yml | 12 ++++++------ roles/ceph-rgw/tasks/multisite/secondary.yml | 10 +++++----- roles/ceph-rgw/tasks/rgw_create_pools.yml | 14 +++++++------- 15 files changed, 44 insertions(+), 41 deletions(-) diff --git a/infrastructure-playbooks/filestore-to-bluestore.yml b/infrastructure-playbooks/filestore-to-bluestore.yml index 1ed6208df..8a612436f 100644 --- a/infrastructure-playbooks/filestore-to-bluestore.yml +++ b/infrastructure-playbooks/filestore-to-bluestore.yml @@ -41,7 +41,7 @@ name: ceph-facts - name: get ceph osd tree data - command: "{{ container_exec_cmd }} ceph osd tree -f json" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph osd tree -f json" delegate_to: "{{ groups[mon_group_name][0] }}" register: osd_tree run_once: true @@ -82,7 +82,7 @@ ignore_errors: true - name: mark out osds - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}" with_together: - "{{ simple_scan.results }}" - "{{ partlabel.results }}" @@ -170,7 +170,7 @@ - name: ceph-volume prepared OSDs related tasks block: - name: mark out osds - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd out {{ item }}" with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -193,7 +193,7 @@ - item.type == 'data' - name: mark down osds - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd down {{ item }}" with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -266,7 +266,7 @@ - name: purge osd(s) from the cluster command: > - {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it + {{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ osd_ids }}" diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 97a269fa6..7f25e62ca 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -135,13 +135,13 @@ - block: - name: get ceph cluster status - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" + command: "{{ hostvars[mon_host]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json" register: check_cluster_status delegate_to: "{{ mon_host }}" - block: - name: display ceph health detail - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail" + command: "{{ hostvars[mon_host]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} health detail" delegate_to: "{{ mon_host }}" - name: fail if cluster isn't in an acceptable state diff --git a/roles/ceph-client/tasks/pre_requisite.yml b/roles/ceph-client/tasks/pre_requisite.yml index 697352377..e545b45ae 100644 --- a/roles/ceph-client/tasks/pre_requisite.yml +++ b/roles/ceph-client/tasks/pre_requisite.yml @@ -2,7 +2,7 @@ - name: copy ceph admin keyring block: - name: get keys from monitors - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}" register: _client_keys with_items: - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } diff --git a/roles/ceph-iscsi-gw/tasks/common.yml b/roles/ceph-iscsi-gw/tasks/common.yml index 678f239c4..9c2054e1c 100644 --- a/roles/ceph-iscsi-gw/tasks/common.yml +++ b/roles/ceph-iscsi-gw/tasks/common.yml @@ -1,6 +1,6 @@ --- - name: get keys from monitors - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}" register: _iscsi_keys with_items: - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } diff --git a/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml b/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml index cd98ff13a..8ef238d96 100644 --- a/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml +++ b/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml @@ -15,7 +15,7 @@ - "iscsi-gateway-pub.key" - name: check for existing crt file(s) in monitor key/value store - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}" with_items: "{{ crt_files }}" changed_when: false failed_when: false @@ -65,7 +65,7 @@ delegate_to: localhost - name: store ssl crt/key files - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}" run_once: true delegate_to: "{{ groups.get(mon_group_name)[0] }}" with_items: "{{ iscsi_ssl_files_content.results }}" diff --git a/roles/ceph-mds/tasks/common.yml b/roles/ceph-mds/tasks/common.yml index 1d5364a70..25074f11d 100644 --- a/roles/ceph-mds/tasks/common.yml +++ b/roles/ceph-mds/tasks/common.yml @@ -11,7 +11,7 @@ - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }} - name: get keys from monitors - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}" register: _mds_keys with_items: - { name: "client.bootstrap-mds", path: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true } diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml index afcb6ecca..c3ee3b385 100644 --- a/roles/ceph-mon/tasks/ceph_keys.yml +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -1,14 +1,15 @@ --- - name: waiting for the monitor(s) to form the quorum... command: > - {{ container_exec_cmd }} + {{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} - daemon mon.{{ ansible_hostname }} + daemon mon.{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['ansible_hostname'] }} mon_status --format json register: ceph_health_raw run_once: true + delegate_to: "{{ groups.get(mon_group_name)[0] }}" until: > (ceph_health_raw.stdout | length > 0) and (ceph_health_raw.stdout | default('{}') | from_json)['state'] in ['leader', 'peon'] retries: "{{ handler_health_mon_check_retries }}" diff --git a/roles/ceph-mon/tasks/secure_cluster.yml b/roles/ceph-mon/tasks/secure_cluster.yml index 63d3cca43..6717b87aa 100644 --- a/roles/ceph-mon/tasks/secure_cluster.yml +++ b/roles/ceph-mon/tasks/secure_cluster.yml @@ -1,14 +1,16 @@ --- - name: collect all the pools command: > - {{ container_exec_cmd }} rados --cluster {{ cluster }} lspools + {{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} rados --cluster {{ cluster }} lspools + delegate_to: "{{ groups.get(mon_group_name)[0] }}" changed_when: false register: ceph_pools check_mode: no - name: secure the cluster command: > - {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true + {{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true + delegate_to: "{{ groups.get(mon_group_name)[0] }}" changed_when: false with_nested: - "{{ ceph_pools.stdout_lines|default([]) }}" diff --git a/roles/ceph-rbd-mirror/tasks/common.yml b/roles/ceph-rbd-mirror/tasks/common.yml index e350808ae..8bee56368 100644 --- a/roles/ceph-rbd-mirror/tasks/common.yml +++ b/roles/ceph-rbd-mirror/tasks/common.yml @@ -1,6 +1,6 @@ --- - name: get keys from monitors - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}" register: _rbd_mirror_keys with_items: - { name: "client.bootstrap-rbd-mirror", path: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring", copy_key: true } diff --git a/roles/ceph-rgw/tasks/common.yml b/roles/ceph-rgw/tasks/common.yml index 5e0520c69..cd5ff279b 100644 --- a/roles/ceph-rgw/tasks/common.yml +++ b/roles/ceph-rgw/tasks/common.yml @@ -9,7 +9,7 @@ with_items: "{{ rbd_client_admin_socket_path }}" - name: get keys from monitors - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}" register: _rgw_keys with_items: - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true } diff --git a/roles/ceph-rgw/tasks/multisite/checks.yml b/roles/ceph-rgw/tasks/multisite/checks.yml index a9fc55f63..07e0cdb4f 100644 --- a/roles/ceph-rgw/tasks/multisite/checks.yml +++ b/roles/ceph-rgw/tasks/multisite/checks.yml @@ -1,6 +1,6 @@ --- - name: check if the realm already exists - command: "{{ container_exec_cmd }} radosgw-admin realm get --cluster={{ cluster }} --rgw-realm={{ item }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin realm get --cluster={{ cluster }} --rgw-realm={{ item }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: realmcheck failed_when: False @@ -11,7 +11,7 @@ when: realms is defined - name: check if the zonegroup already exists - command: "{{ container_exec_cmd }} radosgw-admin zonegroup get --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zonegroup get --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: zonegroupcheck failed_when: False @@ -22,7 +22,7 @@ when: zonegroups is defined - name: check if the zone already exists - command: "{{ container_exec_cmd }} radosgw-admin zone get --rgw-realm={{ item.realm }} --cluster={{ cluster }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone get --rgw-realm={{ item.realm }} --cluster={{ cluster }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: zonecheck failed_when: False diff --git a/roles/ceph-rgw/tasks/multisite/create_zone_user.yml b/roles/ceph-rgw/tasks/multisite/create_zone_user.yml index 601f431ce..1ac1084fa 100644 --- a/roles/ceph-rgw/tasks/multisite/create_zone_user.yml +++ b/roles/ceph-rgw/tasks/multisite/create_zone_user.yml @@ -9,7 +9,7 @@ - hostvars[item.host]['rgw_zonegroupmaster'] | bool - name: check if the realm system user already exists - command: "{{ container_exec_cmd }} radosgw-admin user info --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --uid={{ item.user }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin user info --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --uid={{ item.user }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: usercheck failed_when: False @@ -19,7 +19,7 @@ loop: "{{ zone_users }}" - name: create the zone user(s) - command: "{{ container_exec_cmd }} radosgw-admin user create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --uid={{ item.item.user }} --display-name='{{ item.item.display_name }}' --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} --system" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin user create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --uid={{ item.item.user }} --display-name='{{ item.item.display_name }}' --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} --system" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ usercheck.results }}" diff --git a/roles/ceph-rgw/tasks/multisite/master.yml b/roles/ceph-rgw/tasks/multisite/master.yml index 269681287..5305ea284 100644 --- a/roles/ceph-rgw/tasks/multisite/master.yml +++ b/roles/ceph-rgw/tasks/multisite/master.yml @@ -1,6 +1,6 @@ --- - name: create the realm(s) - command: "{{ container_exec_cmd }} radosgw-admin realm create --cluster={{ cluster }} --rgw-realm={{ item.item }} {{ '--default' if realms | length == 1 else '' }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin realm create --cluster={{ cluster }} --rgw-realm={{ item.item }} {{ '--default' if realms | length == 1 else '' }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ realmcheck.results }}" @@ -10,7 +10,7 @@ - "'No such file or directory' in item.stderr" - name: create zonegroup(s) - command: "{{ container_exec_cmd }} radosgw-admin zonegroup create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} {{ '--default' if zonegroups | length == 1 else '' }} {{ '--master' if item.item.is_master | bool else '' }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zonegroup create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} {{ '--default' if zonegroups | length == 1 else '' }} {{ '--master' if item.item.is_master | bool else '' }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ zonegroupcheck.results }}" @@ -21,7 +21,7 @@ - "'No such file or directory' in item.stderr" - name: create the master zone - command: "{{ container_exec_cmd }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }} --master" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }} --master" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ zonecheck.results }}" @@ -32,7 +32,7 @@ - "'No such file or directory' in item.stderr" - name: add endpoints to their zone groups(s) - command: "{{ container_exec_cmd }} radosgw-admin zonegroup modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --endpoints {{ item.endpoints }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zonegroup modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --endpoints {{ item.endpoints }}" loop: "{{ zone_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -41,7 +41,7 @@ - item.is_master | bool - name: add endpoints to their zone(s) - command: "{{ container_exec_cmd }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}" loop: "{{ zone_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -50,7 +50,7 @@ - item.is_master | bool - name: update period for zone creation - command: "{{ container_exec_cmd }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ zone_endpoints_list }}" diff --git a/roles/ceph-rgw/tasks/multisite/secondary.yml b/roles/ceph-rgw/tasks/multisite/secondary.yml index 920418955..2a76d4169 100644 --- a/roles/ceph-rgw/tasks/multisite/secondary.yml +++ b/roles/ceph-rgw/tasks/multisite/secondary.yml @@ -1,20 +1,20 @@ --- - name: fetch the realm(s) - command: "{{ container_exec_cmd }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ secondary_realms }}" when: secondary_realms is defined - name: get the period(s) - command: "{{ container_exec_cmd }} radosgw-admin period get --cluster={{ cluster }} --rgw-realm={{ item.realm }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin period get --cluster={{ cluster }} --rgw-realm={{ item.realm }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ secondary_realms }}" when: secondary_realms is defined - name: create the zone - command: "{{ container_exec_cmd }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ zonecheck.results }}" @@ -25,7 +25,7 @@ - "'No such file or directory' in item.stderr" - name: add endpoints to their zone(s) - command: "{{ container_exec_cmd }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}" loop: "{{ zone_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -34,7 +34,7 @@ - not item.is_master | bool - name: update period for zone creation - command: "{{ container_exec_cmd }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true loop: "{{ zone_endpoints_list }}" diff --git a/roles/ceph-rgw/tasks/rgw_create_pools.yml b/roles/ceph-rgw/tasks/rgw_create_pools.yml index da0866d44..652493089 100644 --- a/roles/ceph-rgw/tasks/rgw_create_pools.yml +++ b/roles/ceph-rgw/tasks/rgw_create_pools.yml @@ -1,6 +1,6 @@ --- - name: remove ec profile - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile rm {{ item.value.ec_profile }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile rm {{ item.value.ec_profile }}" loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false @@ -10,7 +10,7 @@ failed_when: false - name: set ec profile - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile set {{ item.value.ec_profile }} k={{ item.value.ec_k }} m={{ item.value.ec_m }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile set {{ item.value.ec_profile }} k={{ item.value.ec_k }} m={{ item.value.ec_m }}" loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false @@ -19,7 +19,7 @@ - item.value.type == 'ec' - name: set crush rule - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd crush rule create-erasure {{ item.key }} {{ item.value.ec_profile }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd crush rule create-erasure {{ item.key }} {{ item.value.ec_profile }}" loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false @@ -28,7 +28,7 @@ - item.value.type == 'ec' - name: create ec pools for rgw - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} erasure {{ item.value.ec_profile }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} erasure {{ item.value.ec_profile }}" loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false @@ -37,7 +37,7 @@ - item.value.type == 'ec' - name: create replicated pools for rgw - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated {{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated {{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}" changed_when: false register: result retries: 60 @@ -48,7 +48,7 @@ when: item.value.type is not defined or item.value.type == 'replicated' - name: customize replicated pool size - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.value.size | default(osd_pool_default_size) | int == 1 else '' }}" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.value.size | default(osd_pool_default_size) | int == 1 else '' }}" register: result retries: 60 delay: 3 @@ -74,7 +74,7 @@ - item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) - name: set the rgw_create_pools pools application to rgw - command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw" + command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw" register: result retries: 60 delay: 3