ceph-ansible/roles/ceph-client/tasks/create_users_keys.yml

105 lines
4.5 KiB
YAML
Raw Normal View History

---
- name: set_fact keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module
set_fact:
keys_tmp: "{{ keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
when: item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
with_items: "{{ keys }}"
- name: set_fact keys - override keys_tmp with keys
set_fact:
keys: "{{ keys_tmp }}"
when: keys_tmp is defined
# dummy container setup is only supported on x86_64
# when running with containerized_deployment: true this task
# creates a group that contains only x86_64 hosts.
# when running with containerized_deployment: false this task
# will add all client hosts to the group (and not filter).
- name: create filtered clients group
add_host:
name: "{{ item }}"
groups: _filtered_clients
with_items: "{{ groups[client_group_name] | intersect(ansible_play_batch) }}"
when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment | bool)
- name: set_fact delegated_node
set_fact:
delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
- name: set_fact admin_key_presence
set_fact:
admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
- name: create cephx key(s)
ceph_key:
name: "{{ item.name }}"
caps: "{{ item.caps }}"
secret: "{{ item.key | default('') }}"
cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}"
import_key: "{{ admin_key_presence }}"
mode: "{{ item.mode|default(omit) }}"
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items:
- "{{ keys }}"
delegate_to: "{{ delegated_node }}"
when:
- cephx | bool
- keys | length > 0
- inventory_hostname == groups.get('_filtered_clients') | first
- name: slurp client cephx key(s)
slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items: "{{ keys }}"
register: slurp_client_keys
delegate_to: "{{ delegated_node }}"
when:
- cephx | bool
- keys | length > 0
- inventory_hostname == groups.get('_filtered_clients') | first
- name: pool related tasks
when:
- admin_key_presence | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
- import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: create ceph pool(s)
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
rule_name: "{{ item.rule_name | default(omit) }}"
erasure_profile: "{{ item.erasure_profile | default(omit) }}"
pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
application: "{{ item.application | default(omit) }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ pools }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
- name: get client cephx keys
copy:
dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}"
mode: "{{ item.item.get('mode', '0600') }}"
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
when: not item.get('skipped', False)