mirror of https://github.com/ceph/ceph-ansible.git
container: call container_exec_cmd with right path
This task is delegated on the first mon, so we should call the fact with the right path. Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>guits-fix_osd_limit_handler
parent
a59845bb69
commit
8ed6dc10a8
|
@ -41,7 +41,7 @@
|
|||
name: ceph-facts
|
||||
|
||||
- name: get ceph osd tree data
|
||||
command: "{{ container_exec_cmd }} ceph osd tree -f json"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph osd tree -f json"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: osd_tree
|
||||
run_once: true
|
||||
|
@ -82,7 +82,7 @@
|
|||
ignore_errors: true
|
||||
|
||||
- name: mark out osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd out {{ (item.0.stdout | from_json).whoami }}"
|
||||
with_together:
|
||||
- "{{ simple_scan.results }}"
|
||||
- "{{ partlabel.results }}"
|
||||
|
@ -170,7 +170,7 @@
|
|||
- name: ceph-volume prepared OSDs related tasks
|
||||
block:
|
||||
- name: mark out osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd out {{ item }}"
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
@ -193,7 +193,7 @@
|
|||
- item.type == 'data'
|
||||
|
||||
- name: mark down osds
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ item }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd down {{ item }}"
|
||||
with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
@ -266,7 +266,7 @@
|
|||
|
||||
- name: purge osd(s) from the cluster
|
||||
command: >
|
||||
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
|
||||
{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it
|
||||
run_once: true
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items: "{{ osd_ids }}"
|
||||
|
|
|
@ -135,13 +135,13 @@
|
|||
|
||||
- block:
|
||||
- name: get ceph cluster status
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
|
||||
command: "{{ hostvars[mon_host]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} -s -f json"
|
||||
register: check_cluster_status
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
||||
- block:
|
||||
- name: display ceph health detail
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail"
|
||||
command: "{{ hostvars[mon_host]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} health detail"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
||||
- name: fail if cluster isn't in an acceptable state
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: copy ceph admin keyring
|
||||
block:
|
||||
- name: get keys from monitors
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
register: _client_keys
|
||||
with_items:
|
||||
- { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: get keys from monitors
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
register: _iscsi_keys
|
||||
with_items:
|
||||
- { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
- "iscsi-gateway-pub.key"
|
||||
|
||||
- name: check for existing crt file(s) in monitor key/value store
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}"
|
||||
with_items: "{{ crt_files }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
@ -65,7 +65,7 @@
|
|||
delegate_to: localhost
|
||||
|
||||
- name: store ssl crt/key files
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
|
||||
with_items: "{{ iscsi_ssl_files_content.results }}"
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
- /var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}
|
||||
|
||||
- name: get keys from monitors
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
register: _mds_keys
|
||||
with_items:
|
||||
- { name: "client.bootstrap-mds", path: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true }
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
---
|
||||
- name: waiting for the monitor(s) to form the quorum...
|
||||
command: >
|
||||
{{ container_exec_cmd }}
|
||||
{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }}
|
||||
ceph
|
||||
--cluster {{ cluster }}
|
||||
daemon mon.{{ ansible_hostname }}
|
||||
daemon mon.{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['ansible_hostname'] }}
|
||||
mon_status
|
||||
--format json
|
||||
register: ceph_health_raw
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
|
||||
until: >
|
||||
(ceph_health_raw.stdout | length > 0) and (ceph_health_raw.stdout | default('{}') | from_json)['state'] in ['leader', 'peon']
|
||||
retries: "{{ handler_health_mon_check_retries }}"
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
---
|
||||
- name: collect all the pools
|
||||
command: >
|
||||
{{ container_exec_cmd }} rados --cluster {{ cluster }} lspools
|
||||
{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} rados --cluster {{ cluster }} lspools
|
||||
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
|
||||
changed_when: false
|
||||
register: ceph_pools
|
||||
check_mode: no
|
||||
|
||||
- name: secure the cluster
|
||||
command: >
|
||||
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
|
||||
{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
|
||||
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
|
||||
changed_when: false
|
||||
with_nested:
|
||||
- "{{ ceph_pools.stdout_lines|default([]) }}"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: get keys from monitors
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
register: _rbd_mirror_keys
|
||||
with_items:
|
||||
- { name: "client.bootstrap-rbd-mirror", path: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring", copy_key: true }
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
with_items: "{{ rbd_client_admin_socket_path }}"
|
||||
|
||||
- name: get keys from monitors
|
||||
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
register: _rgw_keys
|
||||
with_items:
|
||||
- { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: check if the realm already exists
|
||||
command: "{{ container_exec_cmd }} radosgw-admin realm get --cluster={{ cluster }} --rgw-realm={{ item }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin realm get --cluster={{ cluster }} --rgw-realm={{ item }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: realmcheck
|
||||
failed_when: False
|
||||
|
@ -11,7 +11,7 @@
|
|||
when: realms is defined
|
||||
|
||||
- name: check if the zonegroup already exists
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zonegroup get --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zonegroup get --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: zonegroupcheck
|
||||
failed_when: False
|
||||
|
@ -22,7 +22,7 @@
|
|||
when: zonegroups is defined
|
||||
|
||||
- name: check if the zone already exists
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zone get --rgw-realm={{ item.realm }} --cluster={{ cluster }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone get --rgw-realm={{ item.realm }} --cluster={{ cluster }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: zonecheck
|
||||
failed_when: False
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
- hostvars[item.host]['rgw_zonegroupmaster'] | bool
|
||||
|
||||
- name: check if the realm system user already exists
|
||||
command: "{{ container_exec_cmd }} radosgw-admin user info --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --uid={{ item.user }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin user info --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --uid={{ item.user }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
register: usercheck
|
||||
failed_when: False
|
||||
|
@ -19,7 +19,7 @@
|
|||
loop: "{{ zone_users }}"
|
||||
|
||||
- name: create the zone user(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin user create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --uid={{ item.item.user }} --display-name='{{ item.item.display_name }}' --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} --system"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin user create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --uid={{ item.item.user }} --display-name='{{ item.item.display_name }}' --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} --system"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ usercheck.results }}"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: create the realm(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin realm create --cluster={{ cluster }} --rgw-realm={{ item.item }} {{ '--default' if realms | length == 1 else '' }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin realm create --cluster={{ cluster }} --rgw-realm={{ item.item }} {{ '--default' if realms | length == 1 else '' }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ realmcheck.results }}"
|
||||
|
@ -10,7 +10,7 @@
|
|||
- "'No such file or directory' in item.stderr"
|
||||
|
||||
- name: create zonegroup(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zonegroup create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} {{ '--default' if zonegroups | length == 1 else '' }} {{ '--master' if item.item.is_master | bool else '' }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zonegroup create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} {{ '--default' if zonegroups | length == 1 else '' }} {{ '--master' if item.item.is_master | bool else '' }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ zonegroupcheck.results }}"
|
||||
|
@ -21,7 +21,7 @@
|
|||
- "'No such file or directory' in item.stderr"
|
||||
|
||||
- name: create the master zone
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }} --master"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }} --master"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ zonecheck.results }}"
|
||||
|
@ -32,7 +32,7 @@
|
|||
- "'No such file or directory' in item.stderr"
|
||||
|
||||
- name: add endpoints to their zone groups(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zonegroup modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --endpoints {{ item.endpoints }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zonegroup modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --endpoints {{ item.endpoints }}"
|
||||
loop: "{{ zone_endpoints_list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
@ -41,7 +41,7 @@
|
|||
- item.is_master | bool
|
||||
|
||||
- name: add endpoints to their zone(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}"
|
||||
loop: "{{ zone_endpoints_list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
@ -50,7 +50,7 @@
|
|||
- item.is_master | bool
|
||||
|
||||
- name: update period for zone creation
|
||||
command: "{{ container_exec_cmd }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ zone_endpoints_list }}"
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
---
|
||||
- name: fetch the realm(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ secondary_realms }}"
|
||||
when: secondary_realms is defined
|
||||
|
||||
- name: get the period(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin period get --cluster={{ cluster }} --rgw-realm={{ item.realm }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin period get --cluster={{ cluster }} --rgw-realm={{ item.realm }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ secondary_realms }}"
|
||||
when: secondary_realms is defined
|
||||
|
||||
- name: create the zone
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone create --cluster={{ cluster }} --rgw-realm={{ item.item.realm }} --rgw-zonegroup={{ item.item.zonegroup }} --rgw-zone={{ item.item.zone }} --access-key={{ item.item.system_access_key }} --secret={{ item.item.system_secret_key }} {{ '--default' if zones | length == 1 else '' }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ zonecheck.results }}"
|
||||
|
@ -25,7 +25,7 @@
|
|||
- "'No such file or directory' in item.stderr"
|
||||
|
||||
- name: add endpoints to their zone(s)
|
||||
command: "{{ container_exec_cmd }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin zone modify --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} --endpoints {{ item.endpoints }}"
|
||||
loop: "{{ zone_endpoints_list }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
|
@ -34,7 +34,7 @@
|
|||
- not item.is_master | bool
|
||||
|
||||
- name: update period for zone creation
|
||||
command: "{{ container_exec_cmd }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
loop: "{{ zone_endpoints_list }}"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: remove ec profile
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile rm {{ item.value.ec_profile }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile rm {{ item.value.ec_profile }}"
|
||||
loop: "{{ rgw_create_pools | dict2items }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
|
@ -10,7 +10,7 @@
|
|||
failed_when: false
|
||||
|
||||
- name: set ec profile
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile set {{ item.value.ec_profile }} k={{ item.value.ec_k }} m={{ item.value.ec_m }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd erasure-code-profile set {{ item.value.ec_profile }} k={{ item.value.ec_k }} m={{ item.value.ec_m }}"
|
||||
loop: "{{ rgw_create_pools | dict2items }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
|
@ -19,7 +19,7 @@
|
|||
- item.value.type == 'ec'
|
||||
|
||||
- name: set crush rule
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd crush rule create-erasure {{ item.key }} {{ item.value.ec_profile }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd crush rule create-erasure {{ item.key }} {{ item.value.ec_profile }}"
|
||||
loop: "{{ rgw_create_pools | dict2items }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
|
@ -28,7 +28,7 @@
|
|||
- item.value.type == 'ec'
|
||||
|
||||
- name: create ec pools for rgw
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} erasure {{ item.value.ec_profile }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} erasure {{ item.value.ec_profile }}"
|
||||
loop: "{{ rgw_create_pools | dict2items }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
|
@ -37,7 +37,7 @@
|
|||
- item.value.type == 'ec'
|
||||
|
||||
- name: create replicated pools for rgw
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated {{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated {{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
|
||||
changed_when: false
|
||||
register: result
|
||||
retries: 60
|
||||
|
@ -48,7 +48,7 @@
|
|||
when: item.value.type is not defined or item.value.type == 'replicated'
|
||||
|
||||
- name: customize replicated pool size
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.value.size | default(osd_pool_default_size) | int == 1 else '' }}"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.value.size | default(osd_pool_default_size) | int == 1 else '' }}"
|
||||
register: result
|
||||
retries: 60
|
||||
delay: 3
|
||||
|
@ -74,7 +74,7 @@
|
|||
- item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name)
|
||||
|
||||
- name: set the rgw_create_pools pools application to rgw
|
||||
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
|
||||
command: "{{ hostvars[groups.get(mon_group_name | default('mons'))[0]]['container_exec_cmd'] | default('') }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
|
||||
register: result
|
||||
retries: 60
|
||||
delay: 3
|
||||
|
|
Loading…
Reference in New Issue