ceph_key: use ceph_key in the playbook

Replaced all the occurence of raw command using the 'command' module
with the ceph_key module instead.

Signed-off-by: Sébastien Han <seb@redhat.com>
pull/2518/head
Sébastien Han 2018-04-04 16:22:36 +02:00 committed by Guillaume Abrioux
parent 473939d215
commit 9657e4d6fa
9 changed files with 115 additions and 129 deletions

View File

@ -43,7 +43,11 @@ dummy:
# $ ceph-authtool --gen-print-key # $ ceph-authtool --gen-print-key
# or # or
# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" # $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)"
#
# To use a particular secret, you have to add 'key' to the dict below, so something like:
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
#
#keys: #keys:
# - { name: client.test, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] } # - { name: client.test, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test" }, mode: "0600", acls: [] }
# - { name: client.test2, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] } # - { name: client.test2, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test2" }, mode: "0600", acls: [] }

View File

@ -142,11 +142,11 @@ dummy:
# To have have ansible setfacl the generated key, set the acls var like so: # To have have ansible setfacl the generated key, set the acls var like so:
# acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"] # acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"]
#openstack_keys: #openstack_keys:
# - { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] } # - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
# - { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] } # - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
# - { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] } # - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
# - { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] } # - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", acls: [] }
# - { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] } # - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
########## ##########

View File

@ -35,6 +35,10 @@ pools:
# $ ceph-authtool --gen-print-key # $ ceph-authtool --gen-print-key
# or # or
# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" # $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)"
#
# To use a particular secret, you have to add 'key' to the dict below, so something like:
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
#
keys: keys:
- { name: client.test, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] } - { name: client.test, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test" }, mode: "0600", acls: [] }
- { name: client.test2, key: "ADD-KEYRING-HERE==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] } - { name: client.test2, caps: { mon: "allow r", osd: "allow class-read object_prefix rbd_children, allow rwx pool=test2" }, mode: "0600", acls: [] }

View File

@ -1,44 +1,40 @@
--- ---
- name: set docker_exec_client_cmd_binary to ceph-authtool - name: run a dummy container (sleep 300) from where we can create pool(s)/key(s)
set_fact: command: >
docker_exec_client_cmd_binary: ceph-authtool docker run \
-d \
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }} \
--name ceph-create-keys \
--entrypoint=sleep \
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
300
changed_when: false
run_once: true
when: containerized_deployment when: containerized_deployment
- name: set docker_exec_client_cmd for containers - name: set docker_exec_client_cmd for containers
set_fact: set_fact:
docker_exec_client_cmd: docker run --rm -v /etc/ceph:/etc/ceph --entrypoint /usr/bin/{{ docker_exec_client_cmd_binary }} {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} docker_exec_client_cmd: "docker exec ceph-create-keys"
run_once: true
when: containerized_deployment when: containerized_deployment
- name: set docker_exec_client_cmd for non-containers - name: create cephx key(s)
set_fact: ceph_key:
docker_exec_client_cmd: ceph-authtool state: present
when: not containerized_deployment name: "{{ item.name }}"
caps: "{{ item.caps }}"
- name: create key(s) secret: "{{ item.key | default('') }}"
shell: "{{ docker_exec_client_cmd }} -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap mon \"{{ item.mon_cap|default('') }}\" --cap osd \"{{ item.osd_cap|default('') }}\" --cap mds \"{{ item.mds_cap|default('') }}\"" containerized: "{{ docker_exec_client_cmd | default('') }}"
args: cluster: "{{ cluster }}"
creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring dest: "{{ ceph_conf_key_directory }}"
with_items: "{{ keys }}" with_items: "{{ keys }}"
changed_when: false
run_once: true run_once: true
when: when:
- cephx - cephx
- keys | length > 0 - keys | length > 0
- inventory_hostname in groups.get(client_group_name) | first - inventory_hostname in groups.get(client_group_name) | first
- name: set docker_exec_client_cmd_binary to ceph - name: slurp client cephx key(s)
set_fact:
docker_exec_client_cmd_binary: ceph
when: containerized_deployment
- name: replace docker_exec_client_cmd by ceph
set_fact:
docker_exec_client_cmd: ceph
when:
- not containerized_deployment
- docker_exec_client_cmd == 'ceph-authtool'
- name: slurp client key(s)
slurp: slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring" src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items: with_items:
@ -50,20 +46,9 @@
- keys | length > 0 - keys | length > 0
- inventory_hostname in groups.get(client_group_name) | first - inventory_hostname in groups.get(client_group_name) | first
- name: check if key(s) already exist(s) - name: create ceph pool(s)
command: "{{ docker_exec_client_cmd }} --cluster {{ cluster }} auth get {{ item.name }}"
changed_when: false
failed_when: false
with_items: "{{ keys }}"
register: keys_exist
run_once: true
when:
- copy_admin_key
- inventory_hostname in groups.get(client_group_name) | first
- name: create pool(s)
command: > command: >
{{ docker_exec_client_cmd }} --cluster {{ cluster }} {{ docker_exec_client_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }} osd pool create {{ item.name }}
{{ item.get('pg_num', hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num']) }} {{ item.get('pg_num', hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num']) }}
{{ item.pgp_num | default(item.pg_num) }} {{ item.pgp_num | default(item.pg_num) }}
@ -81,25 +66,13 @@
- copy_admin_key - copy_admin_key
- inventory_hostname in groups.get(client_group_name) | first - inventory_hostname in groups.get(client_group_name) | first
- name: add key(s) to ceph - name: kill a dummy container that created pool(s)/key(s)
command: "{{ docker_exec_client_cmd }} --cluster {{ cluster }} auth import -i /etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring" command: docker rm -f ceph-create-keys
changed_when: false changed_when: false
run_once: true run_once: true
with_together: when: containerized_deployment
- "{{ keys }}"
- "{{ keys_exist.results | default([]) }}"
when:
- not item.1.get("skipped")
- copy_admin_key
- item.1.rc != 0
- inventory_hostname in groups.get(client_group_name) | first
- name: put docker_exec_client_cmd back to normal with a none value - name: get client cephx keys
set_fact:
docker_exec_client_cmd:
when: docker_exec_client_cmd == 'ceph'
- name: get client keys
copy: copy:
dest: "{{ item.source }}" dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}" content: "{{ item.content | b64decode }}"
@ -109,18 +82,18 @@
- not item.get('skipped', False) - not item.get('skipped', False)
- not inventory_hostname == groups.get(client_group_name, []) | first - not inventory_hostname == groups.get(client_group_name, []) | first
- name: chmod key(s) - name: chmod cephx key(s)
file: file:
path: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" path: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
mode: "{{ item.mode|default(omit) }}" # if mode not in list, uses mode from ps umask mode: "{{ item.mode|default(omit) }}" # if mode not in list, uses mode from ps umask
with_items: "{{ keys }}" with_items: "{{ keys }}"
when: when:
- cephx - cephx
- keys | length > 0 - keys | length > 0
- name: setfacl for key(s) - name: setfacl for cephx key(s)
acl: acl:
path: "/etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring" path: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.0.name }}.keyring"
entry: "{{ item.1 }}" entry: "{{ item.1 }}"
state: present state: present
with_subelements: with_subelements:

View File

@ -1,12 +1,4 @@
--- ---
- name: check keys has been filled in users.key variables
fail:
msg: "you must generate and set keys properly in users.key variables"
with_items: "{{ keys }}"
when:
- user_config
- item.key == 'ADD-KEYRING-HERE=='
- name: set selinux permissions - name: set selinux permissions
shell: | shell: |
chcon -Rt svirt_sandbox_file_t /etc/ceph chcon -Rt svirt_sandbox_file_t /etc/ceph

View File

@ -134,11 +134,11 @@ openstack_pools:
# To have have ansible setfacl the generated key, set the acls var like so: # To have have ansible setfacl the generated key, set the acls var like so:
# acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"] # acls: ["u:nova:r--", "u:cinder:r--", "u:glance:r--", "u:gnocchi:r--"]
openstack_keys: openstack_keys:
- { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] } - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
- { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}", mode: "0600", acls: [] } - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600", acls: [] }
- { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] } - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
- { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] } - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", acls: [] }
- { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_cinder_backup_pool.name }}", mode: "0600", acls: [] } - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600", acls: [] }
########## ##########

View File

@ -55,20 +55,27 @@
- cephx - cephx
- name: create ceph rest api keyring when mon is not containerized - name: create ceph rest api keyring when mon is not containerized
command: ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring ceph_key:
args: name: client.restapi
creates: /etc/ceph/{{ cluster }}.client.restapi.keyring state: present
changed_when: false caps:
mon: allow *
osd: allow *
cluster: "{{ cluster }}"
when: when:
- cephx - cephx
- groups.get(restapi_group_name, []) | length > 0 - groups.get(restapi_group_name, []) | length > 0
- inventory_hostname == groups[mon_group_name]|last - inventory_hostname == groups[mon_group_name]|last
- name: create ceph mgr keyring(s) when mon is not containerized - name: create ceph mgr keyring(s) when mon is not containerized
command: ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring ceph_key:
args: name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
creates: /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring state: present
changed_when: false caps:
mon: allow profile mgr
osd: allow *
mds: allow *
cluster: "{{ cluster }}"
when: when:
- cephx - cephx
- groups.get(mgr_group_name, []) | length > 0 - groups.get(mgr_group_name, []) | length > 0
@ -76,6 +83,8 @@
- ceph_release_num[ceph_release] > ceph_release_num.jewel - ceph_release_num[ceph_release] > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}" with_items: "{{ groups.get(mgr_group_name, []) }}"
# once this gets backported github.com/ceph/ceph/pull/20983
# we will be able to remove these 2 tasks below
- name: find ceph keys - name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring shell: ls -1 /etc/ceph/*.keyring
changed_when: false changed_when: false

View File

@ -20,14 +20,20 @@
when: cephx when: cephx
- name: create monitor initial keyring - name: create monitor initial keyring
command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *' ceph_key:
args: name: mon.
creates: /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} state: present
dest: "/var/lib/ceph/tmp/"
secret: "{{ monitor_secret }}"
cluster: "{{ cluster }}"
caps:
mon: allow *
import_key: False
when: cephx when: cephx
- name: set initial monitor key permissions - name: set initial monitor key permissions
file: file:
path: /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} path: "/var/lib/ceph/tmp/{{ cluster }}.mon..keyring"
owner: "ceph" owner: "ceph"
group: "ceph" group: "ceph"
mode: "0600" mode: "0600"
@ -42,26 +48,38 @@
mode: "0755" mode: "0755"
recurse: true recurse: true
- name: set_fact ceph_authtool_cap >= ceph_release_num.luminous - name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous
set_fact: set_fact:
ceph_authtool_cap: "--cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' --cap mgr 'allow *'" client_admin_ceph_authtool_cap:
mon: allow *
osd: allow *
mds: allow
mgr: allow *
when: when:
- ceph_release_num[ceph_release] >= ceph_release_num.luminous - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- cephx - cephx
- admin_secret != 'admin_secret' - admin_secret != 'admin_secret'
- name: set_fact ceph_authtool_cap < ceph_release_num.luminous - name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous
set_fact: set_fact:
ceph_authtool_cap: "--cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'" client_admin_ceph_authtool_cap:
mon: allow *
osd: allow *
mds: allow
when: when:
- ceph_release_num[ceph_release] < ceph_release_num.luminous - ceph_release_num[ceph_release] < ceph_release_num.luminous
- cephx - cephx
- admin_secret != 'admin_secret' - admin_secret != 'admin_secret'
- name: create custom admin keyring - name: create custom admin keyring
command: "ceph-authtool /etc/ceph/{{ cluster }}.client.admin.keyring --create-keyring --name=client.admin --add-key={{ admin_secret }} --set-uid=0 {{ ceph_authtool_cap }}" ceph_key:
args: name: client.admin
creates: /etc/ceph/{{ cluster }}.client.admin.keyring state: present
secret: "{{ admin_secret }}"
auid: 0
caps: "{{ client_admin_ceph_authtool_cap }}"
import_key: False
cluster: "{{ cluster }}"
register: create_custom_admin_secret register: create_custom_admin_secret
when: when:
- cephx - cephx
@ -79,14 +97,14 @@
- admin_secret != 'admin_secret' - admin_secret != 'admin_secret'
- name: import admin keyring into mon keyring - name: import admin keyring into mon keyring
command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring command: ceph-authtool /var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
when: when:
- not create_custom_admin_secret.get('skipped') - not create_custom_admin_secret.get('skipped')
- cephx - cephx
- admin_secret != 'admin_secret' - admin_secret != 'admin_secret'
- name: ceph monitor mkfs with keyring - name: ceph monitor mkfs with keyring
command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
args: args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when: when:

View File

@ -21,39 +21,25 @@
when: when:
- ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
# A future version could use "--caps CAPSFILE" - name: create openstack cephx key(s)
# which will set all of capabilities associated with a given key, for all subsystems ceph_key:
- name: create openstack key(s) state: present
shell: "{{ docker_exec_cmd }} bash -c 'ceph-authtool -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap mon \"{{ item.mon_cap|default('') }}\" --cap osd \"{{ item.osd_cap|default('') }}\" --cap mds \"{{ item.mds_cap|default('') }}\"'" name: "{{ item.name }}"
args: caps: "{{ item.caps }}"
creates: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" secret: "{{ item.key | default('') }}"
containerized: "{{ docker_exec_cmd | default(False) }}"
cluster: "{{ cluster }}"
with_items: "{{ openstack_keys }}" with_items: "{{ openstack_keys }}"
changed_when: false
when: cephx when: cephx
- name: check if openstack key(s) already exist(s) - name: fetch openstack cephx key(s)
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
changed_when: false
failed_when: false
with_items: "{{ openstack_keys }}"
register: openstack_key_exist
- name: add openstack key(s) to ceph
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth import -i /etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring"
changed_when: false
with_together:
- "{{ openstack_keys }}"
- "{{ openstack_key_exist.results }}"
when: item.1.rc != 0
- name: fetch openstack key(s)
fetch: fetch:
src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
flat: yes flat: yes
with_items: "{{ openstack_keys }}" with_items: "{{ openstack_keys }}"
- name: copy to other mons the openstack key(s) - name: copy to other mons the openstack cephx key(s)
copy: copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
@ -66,7 +52,7 @@
- openstack_config - openstack_config
- item.0 != groups[mon_group_name] | last - item.0 != groups[mon_group_name] | last
- name: chmod openstack key(s) on the other mons and this mon - name: chmod openstack cephx key(s) on the other mons and this mon
file: file:
path: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" path: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
mode: "{{ item.1.mode|default(omit) }}" # if mode not in list, uses mode from ps umask mode: "{{ item.1.mode|default(omit) }}" # if mode not in list, uses mode from ps umask
@ -78,7 +64,7 @@
- openstack_config - openstack_config
- cephx - cephx
- name: setfacl for openstack key(s) on the other mons and this mon - name: setfacl for openstack cephx key(s) on the other mons and this mon
command: "setfacl -m {{ item.1.acls | join(',') }} /etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" command: "setfacl -m {{ item.1.acls | join(',') }} /etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
with_nested: with_nested:
- "{{ groups[mon_group_name] }}" - "{{ groups[mon_group_name] }}"