rename docker_exec_cmd variable

This commit renames the `docker_exec_cmd` variable to
`container_exec_cmd` so it's more generic.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit e74d80e72f)
pull/3999/head
Guillaume Abrioux 2019-05-14 14:51:32 +02:00
parent 71a85405b7
commit e29fd842a6
34 changed files with 131 additions and 131 deletions

View File

@ -557,7 +557,7 @@ dummy:
##########
# DOCKER #
##########
#docker_exec_cmd:
#container_exec_cmd:
#docker: false
#ceph_docker_image: "ceph/daemon"
#ceph_docker_image_tag: latest

View File

@ -557,7 +557,7 @@ ceph_rhcs_version: 4
##########
# DOCKER #
##########
#docker_exec_cmd:
#container_exec_cmd:
#docker: false
ceph_docker_image: "rhceph/rhceph-4-rhel8"
ceph_docker_image_tag: "latest"

View File

@ -79,7 +79,7 @@
add_osd: True
- name: set noup flag
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup"
delegate_to: "{{ groups['mons'][0] }}"
run_once: True
changed_when: False
@ -113,7 +113,7 @@
post_tasks:
- name: unset noup flag
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup"
delegate_to: "{{ groups['mons'][0] }}"
run_once: True
changed_when: False

View File

@ -8,7 +8,7 @@
gather_facts: false
vars:
cluster: ceph
docker_exec_cmd: "docker exec ceph-nano"
container_exec_cmd: "docker exec ceph-nano"
keys_to_info:
- client.admin
- mds.0
@ -29,7 +29,7 @@
caps: "{{ item.caps }}"
cluster: "{{ cluster }}"
secret: "{{ item.key | default('') }}"
containerized: "{{ docker_exec_cmd | default(False) }}"
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}"
- name: update ceph key(s)
@ -38,7 +38,7 @@
state: update
caps: "{{ item.caps }}"
cluster: "{{ cluster }}"
containerized: "{{ docker_exec_cmd | default(False) }}"
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}"
- name: delete ceph key(s)
@ -46,7 +46,7 @@
name: "{{ item }}"
state: absent
cluster: "{{ cluster }}"
containerized: "{{ docker_exec_cmd | default(False) }}"
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_delete }}"
- name: info ceph key(s)
@ -54,7 +54,7 @@
name: "{{ item }}"
state: info
cluster: "{{ cluster }}"
containerized: "{{ docker_exec_cmd }}"
containerized: "{{ container_exec_cmd }}"
register: key_info
ignore_errors: true
with_items: "{{ keys_to_info }}"
@ -63,7 +63,7 @@
ceph_key:
state: list
cluster: "{{ cluster }}"
containerized: "{{ docker_exec_cmd | default(False) }}"
containerized: "{{ container_exec_cmd | default(False) }}"
register: list_keys
ignore_errors: true

View File

@ -421,13 +421,13 @@
- ceph_release in ["nautilus", "octopus"]
- not containerized_deployment
- name: set_fact docker_exec_cmd_osd
- name: set_fact container_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: get osd versions
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -439,19 +439,19 @@
# length == 1 means there is a single osds versions entry
# thus all the osds are running the same version
- name: osd set sortbitwise
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd set sortbitwise"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd set sortbitwise"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
- ceph_versions_osd | string is search("ceph version 10")
- name: get num_pgs - non container
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
register: ceph_pgs
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: waiting for clean pgs...
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
register: ceph_health_post
until: >
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0)
@ -475,20 +475,20 @@
- import_role:
name: ceph-facts
- name: set_fact docker_exec_cmd_osd
- name: set_fact container_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: unset osd flags
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
with_items:
- noout
- norebalance
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: get osd versions
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -500,7 +500,7 @@
# length == 1 means there is a single osds versions entry
# thus all the osds are running the same version
- name: complete osds upgrade
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd require-osd-release luminous"
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd require-osd-release luminous"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
@ -845,17 +845,17 @@
- import_role:
name: ceph-defaults
- name: set_fact docker_exec_cmd_status
- name: set_fact container_exec_cmd_status
set_fact:
docker_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: show ceph status
command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
run_once: True
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: show all daemons version
command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions"
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions"
run_once: True
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -74,13 +74,13 @@
with_items: "{{ groups[mon_group_name] }}"
when: item != mon_to_kill
- name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)"
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
when: containerized_deployment
- name: exit playbook, if can not connect to the cluster
command: "{{ docker_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ mon_host }}"
@ -106,7 +106,7 @@
delegate_to: "{{ mon_to_kill }}"
- name: remove monitor from the quorum
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
failed_when: false
delegate_to: "{{ mon_host }}"
@ -116,7 +116,7 @@
# 'sleep 5' is not that bad and should be sufficient
- name: verify the monitor is out of the cluster
shell: |
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])'
{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])'
delegate_to: "{{ mon_host }}"
failed_when: false
register: result
@ -138,9 +138,9 @@
when: mon_to_kill_hostname in result.stdout
- name: show ceph health
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ mon_host }}"
- name: show ceph mon status
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
delegate_to: "{{ mon_host }}"

View File

@ -62,13 +62,13 @@
name: ceph-facts
post_tasks:
- name: set_fact docker_exec_cmd build docker exec command (containerized)
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: exit playbook, if can not connect to the cluster
command: "{{ docker_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -76,7 +76,7 @@
delay: 2
- name: find the host(s) where the osd(s) is/are running on
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
with_items: "{{ osd_to_kill.split(',') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: find_osd_hosts
@ -87,7 +87,7 @@
with_items: "{{ find_osd_hosts.results }}"
- name: mark osd(s) out of the cluster
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -111,15 +111,15 @@
loop: "{{ osd_hosts }}"
- name: purge osd(s) from the cluster
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ osd_to_kill.split(',') }}"
- name: show ceph health
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: show ceph osd tree
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -136,7 +136,7 @@
post_tasks:
- name: waiting for the monitor to join the quorum...
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s --format json"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s --format json"
register: ceph_health_raw
until: >
hostvars[mon_host]['ansible_hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]

View File

@ -58,13 +58,13 @@
name: ceph-defaults
post_tasks:
- name: set_fact docker_exec_cmd build docker exec command (containerized)
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: exit playbook, if can not connect to the cluster
command: "{{ docker_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -72,7 +72,7 @@
delay: 2
- name: find the host(s) where the osd(s) is/are running on
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
with_items: "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: find_osd_hosts
@ -182,9 +182,9 @@
- "{{ osd_to_replace_disks.results }}"
- name: show ceph health
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: show ceph osd tree
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -54,7 +54,7 @@ EXAMPLES = '''
ceph_crush:
cluster: "{{ cluster }}"
location: "{{ hostvars[item]['osd_crush_location'] }}"
containerized: "{{ docker_exec_cmd }}"
containerized: "{{ container_exec_cmd }}"
with_items: "{{ groups[osd_group_name] }}"
when: crush_rule_config
'''

View File

@ -70,7 +70,7 @@
block:
- name: list existing pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ pools }}"
register: created_pools
@ -79,7 +79,7 @@
- name: create ceph pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
@ -100,7 +100,7 @@
- name: customize pool size
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
@ -111,7 +111,7 @@
- name: customize pool min_size
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
@ -121,7 +121,7 @@
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: assign application to pool(s)
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ pools | unique }}"
changed_when: false
delegate_to: "{{ delegated_node }}"

View File

@ -549,7 +549,7 @@ ceph_tcmalloc_max_total_thread_cache: 0
##########
# DOCKER #
##########
docker_exec_cmd:
container_exec_cmd:
docker: false
ceph_docker_image: "ceph/daemon"
ceph_docker_image_tag: latest

View File

@ -36,9 +36,9 @@
monitor_name: "{{ ansible_fqdn }}"
when: mon_use_fqdn
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
when:
- containerized_deployment
- groups.get(mon_group_name, []) | length > 0
@ -47,7 +47,7 @@
# because it blindly picks a mon, which may be down because
# of the rolling update
- name: is ceph running already?
command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
changed_when: false
failed_when: false
check_mode: no
@ -76,7 +76,7 @@
when: cephx or generate_fsid
- name: get current fsid
command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
when: rolling_update

View File

@ -18,14 +18,14 @@
src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2"
dest: /etc/ceph/iscsi-gateway.cfg
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment
- name: check if a rbd pool exists
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
changed_when: false
register: rbd_pool_exists
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -34,12 +34,12 @@
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
block:
- name: create a rbd pool if it doesn't exist
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: customize pool size
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size

View File

@ -1,7 +1,7 @@
---
- name: set_fact docker_exec_cmd mds
- name: set_fact container_exec_cmd mds
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
- name: set_fact admin_keyring
set_fact:
@ -62,7 +62,7 @@
daemon_reload: yes
- name: wait for mds socket to exist
command: "{{ docker_exec_cmd }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'"
command: "{{ container_exec_cmd }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'"
changed_when: false
register: multi_mds_socket
retries: 5

View File

@ -9,7 +9,7 @@
cephfs_pool_names: "{{ cephfs_pools | map(attribute='name') | list }}"
- name: get and store list of filesystem pools
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls"
changed_when: false
register: osd_pool_ls
@ -23,7 +23,7 @@
block:
- name: create filesystem pools
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(item.pgs) | default(osd_pool_default_pg_num) }}
{{ item.pgp_num | default(item.pgs) | default(item.pg_num) | default(osd_pool_default_pg_num) }}
@ -38,19 +38,19 @@
- "{{ cephfs_pools }}"
- name: customize pool size
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: assign application to cephfs pools
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs"
with_items:
- "{{ cephfs_data }}"
- "{{ cephfs_metadata }}"
@ -60,18 +60,18 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: check if ceph filesystem already exists
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
register: check_existing_cephfs
changed_when: false
failed_when: false
- name: create ceph filesystem
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
changed_when: false
when: check_existing_cephfs.rc != 0
- name: set max_mds
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when: mds_max_mds > 1

View File

@ -5,9 +5,9 @@
- inventory_hostname == groups[mds_group_name] | first
- not rolling_update
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
when: containerized_deployment
- name: include common.yml

View File

@ -1,7 +1,7 @@
---
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: include common.yml

View File

@ -1,6 +1,6 @@
---
- name: wait for all mgr to be up
shell: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json | python -c 'import sys, json; print(json.load(sys.stdin)[\"available\"])'"
shell: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json | python -c 'import sys, json; print(json.load(sys.stdin)[\"available\"])'"
register: mgr_dump
retries: 30
delay: 5
@ -10,7 +10,7 @@
- mgr_dump.stdout | bool
- name: get enabled modules from ceph-mgr
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
command: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
check_mode: no
changed_when: false
register: _ceph_mgr_modules
@ -25,13 +25,13 @@
_disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}"
- name: disable ceph mgr enabled modules
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
command: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item not in ceph_mgr_modules
- name: add modules to ceph-mgr
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
command: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
with_items: "{{ ceph_mgr_modules }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])

View File

@ -1,7 +1,7 @@
---
- name: waiting for the monitor(s) to form the quorum...
command: >
{{ docker_exec_cmd }}
{{ container_exec_cmd }}
ceph
--cluster {{ cluster }}
-n mon.

View File

@ -3,7 +3,7 @@
ceph_crush:
cluster: "{{ cluster }}"
location: "{{ hostvars[item]['osd_crush_location'] }}"
containerized: "{{ docker_exec_cmd }}"
containerized: "{{ container_exec_cmd }}"
with_items: "{{ groups[osd_group_name] }}"
register: config_crush_hierarchy
when:
@ -12,13 +12,13 @@
- hostvars[item]['osd_crush_location'] is defined
- name: create configured crush rules
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}"
with_items: "{{ crush_rules | unique }}"
changed_when: false
when: inventory_hostname == groups.get(mon_group_name) | last
- name: get id for new default crush rule
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}"
register: info_ceph_default_crush_rule
changed_when: false
with_items: "{{ crush_rules }}"
@ -38,7 +38,7 @@
- not item.get('skipped', false)
- name: insert new default crush rule into daemon to prevent restart
command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
changed_when: false
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"

View File

@ -1,7 +1,7 @@
---
- name: check if monitor initial keyring already exists
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster ceph --name mon. -k
{{ container_exec_cmd | default('') }} ceph --cluster ceph --name mon. -k
/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}/keyring
auth get-key mon.
register: initial_mon_key

View File

@ -1,7 +1,7 @@
---
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when: containerized_deployment
- name: include deploy_monitors.yml

View File

@ -1,14 +1,14 @@
---
- name: collect all the pools
command: >
{{ docker_exec_cmd }} rados --cluster {{ cluster }} lspools
{{ container_exec_cmd }} rados --cluster {{ cluster }} lspools
changed_when: false
register: ceph_pools
check_mode: no
- name: secure the cluster
command: >
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
changed_when: false
with_nested:
- "{{ ceph_pools.stdout_lines|default([]) }}"

View File

@ -1,11 +1,11 @@
---
- name: set_fact docker_exec_cmd_nfs
- name: set_fact container_exec_cmd_nfs
set_fact:
docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: check if "{{ ceph_nfs_rgw_user }}" exists
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
run_once: true
register: rgwuser_exists
changed_when: false
@ -14,7 +14,7 @@
when: nfs_obj_gw
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
run_once: true
register: rgwuser
changed_when: false

View File

@ -1,7 +1,7 @@
---
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
when: containerized_deployment
- name: include common.yml

View File

@ -1,11 +1,11 @@
---
- name: set_fact docker_exec_cmd_nfs
- name: set_fact container_exec_cmd_nfs
set_fact:
docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment
- name: check if rados index object exists
shell: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
changed_when: false
failed_when: false
register: rados_index_exists
@ -15,7 +15,7 @@
run_once: true
- name: create an empty rados index object
command: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
when:
- ceph_nfs_rados_backend
- rados_index_exists.rc != 0

View File

@ -1,9 +1,9 @@
---
- name: wait for all osd to be up
shell: >
test "$({{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" -gt 0 &&
test "$({{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" =
"$({{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_up_osds"])')"
test "$({{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" -gt 0 &&
test "$({{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" =
"$({{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_up_osds"])')"
register: wait_for_all_osds_up
retries: "{{ nb_retry_wait_osd_up }}"
delay: "{{ delay_wait_osd_up }}"
@ -15,7 +15,7 @@
block:
- name: list existing pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool get {{ item.name }} size
with_items: "{{ openstack_pools | unique }}"
register: created_pools
@ -24,7 +24,7 @@
- name: create openstack pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num | default(osd_pool_default_pg_num) }}
{{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
@ -43,7 +43,7 @@
- name: customize pool size
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -52,7 +52,7 @@
- name: customize pool min_size
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -60,7 +60,7 @@
when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: assign application to pool(s)
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ openstack_pools | unique }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -20,9 +20,9 @@
- name: tasks for containerized deployment
when: containerized_deployment
block:
- name: set_fact docker_exec_cmd
- name: set_fact container_exec_cmd
set_fact:
docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
- name: include docker/main.yml
include_tasks: docker/main.yml

View File

@ -1,6 +1,6 @@
---
- name: update period
command: "{{ docker_exec_cmd }} radosgw-admin --cluster {{ cluster }} period update --commit"
command: "{{ container_exec_cmd }} radosgw-admin --cluster {{ cluster }} period update --commit"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true

View File

@ -26,7 +26,7 @@
when: rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
@ -35,7 +35,7 @@
run_once: true
- name: customize pool size
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default(osd_pool_default_size) }}"
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default(osd_pool_default_size) }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
@ -45,7 +45,7 @@
when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: set the rgw_create_pools pools application to rgw
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -1,6 +1,6 @@
---
- name: check if the realm already exists
command: "{{ docker_exec_cmd }} radosgw-admin realm get --rgw-realm={{ rgw_realm }}"
command: "{{ container_exec_cmd }} radosgw-admin realm get --rgw-realm={{ rgw_realm }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: realmcheck
failed_when: False
@ -8,7 +8,7 @@
check_mode: no
- name: check if the zonegroup already exists
command: "{{ docker_exec_cmd }} radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}"
command: "{{ container_exec_cmd }} radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: zonegroupcheck
failed_when: False
@ -16,7 +16,7 @@
check_mode: no
- name: check if the zone already exists
command: "{{ docker_exec_cmd }} radosgw-admin zone get --rgw-zone={{ rgw_zone }}"
command: "{{ container_exec_cmd }} radosgw-admin zone get --rgw-zone={{ rgw_zone }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: zonecheck
failed_when: False
@ -24,7 +24,7 @@
check_mode: no
- name: check if the system user already exists
command: "{{ docker_exec_cmd }} radosgw-admin user info --uid={{ rgw_zone_user }}"
command: "{{ container_exec_cmd }} radosgw-admin user info --uid={{ rgw_zone_user }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: usercheck
failed_when: False

View File

@ -1,31 +1,31 @@
---
- name: create the realm
command: "{{ docker_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default"
command: "{{ container_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'No such file or directory' in realmcheck.stderr"
- name: create the zonegroup
command: "{{ docker_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default"
command: "{{ container_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'No such file or directory' in zonegroupcheck.stderr"
- name: create the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master"
command: "{{ container_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'No such file or directory' in zonecheck.stderr"
- name: create the zone user
command: "{{ docker_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system"
command: "{{ container_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'could not fetch user info: no user info saved' in usercheck.stderr"
notify: update period
- name: add other endpoints to the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
command: "{{ container_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: rgw_multisite_endpoints_list is defined

View File

@ -1,37 +1,37 @@
---
- name: fetch the realm
command: "{{ docker_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
command: "{{ container_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'No such file or directory' in realmcheck.stderr"
- name: fetch the period
command: "{{ docker_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
command: "{{ container_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'No such file or directory' in realmcheck.stderr"
- name: set default realm
command: "{{ docker_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}"
command: "{{ container_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: set default zonegroup
command: "{{ docker_exec_cmd }} radosgw-admin zonegroup default --rgw-zonegroup={{ rgw_zonegroup }}"
command: "{{ container_exec_cmd }} radosgw-admin zonegroup default --rgw-zonegroup={{ rgw_zonegroup }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: create the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default"
command: "{{ container_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: "'No such file or directory' in zonecheck.stderr"
notify: update period
- name: add other endpoints to the zone
command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
command: "{{ container_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: rgw_multisite_endpoints_list is defined