ceph-ansible/roles/ceph-client/tasks/create_users_keys.yml

103 lines
3.5 KiB
YAML
Raw Normal View History

---
- name: set_fact keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact:
keys_tmp: "{{ keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap|quote, 'osd': item.osd_cap|default('')|quote, 'mds': item.mds_cap|default('')|quote, 'mgr': item.mgr_cap|default('')|quote } , 'mode': item.mode } ] }}"
when:
- item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
with_items: "{{ keys }}"
- name: set_fact keys - override keys_tmp with keys
set_fact:
keys: "{{ keys_tmp }}"
when:
- keys_tmp is defined
- name: run a dummy container (sleep 300) from where we can create pool(s)/key(s)
command: >
docker run \
--rm \
-d \
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
--name ceph-create-keys \
--entrypoint=sleep \
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
300
changed_when: false
run_once: true
when: containerized_deployment
- name: set docker_exec_client_cmd for containers
set_fact:
docker_exec_client_cmd: "docker exec ceph-create-keys"
run_once: true
when: containerized_deployment
- name: create cephx key(s)
ceph_key:
state: present
name: "{{ item.name }}"
caps: "{{ item.caps }}"
secret: "{{ item.key | default('') }}"
containerized: "{{ docker_exec_client_cmd | default('') }}"
cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}"
import_key: "{{ copy_admin_key }}"
mode: "{{ item.mode|default(omit) }}"
with_items: "{{ keys }}"
run_once: true
when:
- cephx
- keys | length > 0
- inventory_hostname in groups.get(client_group_name) | first
- name: slurp client cephx key(s)
slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items:
- "{{ keys }}"
register: slurp_client_keys
run_once: true
when:
- cephx
- keys | length > 0
- inventory_hostname in groups.get(client_group_name) | first
- name: create ceph pool(s)
command: >
{{ docker_exec_client_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.get('pg_num', hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num']) }}
{{ item.pgp_num | default(item.pg_num) }}
{{ item.rule_name | default("replicated_rule") }}
{{ 1 if item.type|default(1) == 'replicated' else 3 if item.type|default(1) == 'erasure' else item.type|default(1) }}
{%- if (item.type | default("1") == '3' or item.type | default("1") == 'erasure') and item.erasure_profile != '' %}
{{ item.erasure_profile }}
{%- endif %}
{{ item.expected_num_objects | default('') }}
with_items: "{{ pools }}"
changed_when: false
run_once: true
when:
- pools | length > 0
- copy_admin_key
- inventory_hostname in groups.get(client_group_name) | first
- name: kill a dummy container that created pool(s)/key(s)
command: docker rm -f ceph-create-keys
changed_when: false
run_once: true
when: containerized_deployment
- name: get client cephx keys
copy:
dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}"
mode: "{{ item.item.mode }}"
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
with_items:
- "{{ slurp_client_keys.results }}"
when:
- not item.get('skipped', False)
- not inventory_hostname == groups.get(client_group_name, []) | first