mirror of https://github.com/ceph/ceph-ansible.git
Merge pull request #1677 from ceph/fix_fetch-copy_files
Add the possibility to deploy a client-node in a containerized deploymentpull/1692/head
commit
b3ac49f750
|
@ -22,6 +22,7 @@ MGRS = settings['mgr_vms']
|
|||
PUBLIC_SUBNET = settings['public_subnet']
|
||||
CLUSTER_SUBNET = settings['cluster_subnet']
|
||||
BOX = settings['vagrant_box']
|
||||
CLIENT_BOX = settings['client_vagrant_box'] || settings['vagrant_box']
|
||||
BOX_URL = settings['vagrant_box_url']
|
||||
SYNC_DIR = settings['vagrant_sync_dir']
|
||||
MEMORY = settings['memory']
|
||||
|
@ -217,6 +218,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
|
||||
(0..CLIENTS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
|
||||
client.vm.box = CLIENT_BOX
|
||||
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
client.vm.network :private_network,
|
||||
|
|
|
@ -10,6 +10,7 @@ pools:
|
|||
- { name: test, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
|
||||
- { name: test2, pgs: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}" }
|
||||
|
||||
# Can add `mds_cap` attribute to override the default value which is '' for mds capabilities.
|
||||
keys:
|
||||
- { name: client.test, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test'" }
|
||||
- { name: client.test2, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test2'" }
|
||||
- { name: client.test, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test" }
|
||||
- { name: client.test2, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2" }
|
||||
|
|
|
@ -1,14 +1,32 @@
|
|||
---
|
||||
- name: create pools
|
||||
command: ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}
|
||||
command: "ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
|
||||
with_items: "{{ pools }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: pools | length > 0
|
||||
|
||||
- name: create keys
|
||||
command: ceph --cluster {{ cluster }} auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
||||
- name: create key(s)
|
||||
shell: "ceph-authtool -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap mon \"{{ item.mon_cap|default('') }}\" --cap osd \"{{ item.osd_cap|default('') }}\" --cap mds \"{{ item.mds_cap|default('') }}\""
|
||||
args:
|
||||
creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
||||
with_items: "{{ keys }}"
|
||||
changed_when: false
|
||||
when: cephx
|
||||
when:
|
||||
- cephx
|
||||
- keys | length > 0
|
||||
|
||||
- name: check if key(s) already exist(s)
|
||||
command: "ceph --cluster {{ cluster }} auth get {{ item.name }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
with_items: "{{ keys }}"
|
||||
register: keys_exist
|
||||
|
||||
- name: add key(s) to ceph
|
||||
command: "ceph --cluster {{ cluster }} auth import -i /etc/ceph/{{ cluster }}.{{ item.0.name }}.keyring"
|
||||
changed_when: false
|
||||
with_together:
|
||||
- "{{ keys }}"
|
||||
- "{{ keys_exist.results }}"
|
||||
when: item.1.rc != 0
|
||||
|
|
|
@ -1,10 +1,20 @@
|
|||
---
|
||||
# only create fetch directory when:
|
||||
# we are not populating kv_store with default ceph.conf AND host is a mon
|
||||
# OR
|
||||
# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
|
||||
- name: create a local fetch directory if it does not exist
|
||||
local_action: file path={{ fetch_directory }} state=directory
|
||||
changed_when: false
|
||||
become: false
|
||||
run_once: true
|
||||
when: cephx or generate_fsid
|
||||
when:
|
||||
- (cephx or generate_fsid)
|
||||
- (not mon_containerized_default_ceph_conf_with_kv and
|
||||
(inventory_hostname in groups.get(mon_group_name, []))) or
|
||||
(not mon_containerized_default_ceph_conf_with_kv and
|
||||
((groups.get(nfs_group_name, []) | length > 0)
|
||||
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
|
||||
|
||||
- name: generate cluster uuid
|
||||
local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
|
@ -32,6 +42,12 @@
|
|||
mode: "0644"
|
||||
config_overrides: "{{ ceph_conf_overrides }}"
|
||||
config_type: ini
|
||||
when:
|
||||
- (not mon_containerized_default_ceph_conf_with_kv and
|
||||
(inventory_hostname in groups.get(mon_group_name, []))) or
|
||||
(not mon_containerized_default_ceph_conf_with_kv and
|
||||
((groups.get(nfs_group_name, []) | length > 0)
|
||||
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
|
||||
|
||||
- name: set fsid fact when generate_fsid = true
|
||||
set_fact:
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
@ -38,7 +38,7 @@
|
|||
|
||||
- name: try to fetch ceph config and keys
|
||||
copy:
|
||||
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
|
||||
src: "{{ fetch_directory }}/{{ fsid }}/{{ item.0 }}"
|
||||
dest: "{{ item.0 }}"
|
||||
owner: root
|
||||
group: root
|
||||
|
|
|
@ -67,18 +67,7 @@
|
|||
# # because it creates the directories needed by the latter.
|
||||
- include: ./dirs_permissions.yml
|
||||
|
||||
# let the first mon create configs and keyrings
|
||||
# Only include 'create_configs.yml" when:
|
||||
# we are not populating kv_store with default ceph.conf AND host is a mon
|
||||
# OR
|
||||
# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
|
||||
- include: create_configs.yml
|
||||
when:
|
||||
- (not mon_containerized_default_ceph_conf_with_kv and
|
||||
(inventory_hostname in groups.get(mon_group_name, []))) or
|
||||
(not mon_containerized_default_ceph_conf_with_kv and
|
||||
((groups.get(nfs_group_name, []) | length > 0)
|
||||
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
|
||||
|
||||
# Only include 'fetch_configs.yml' when:
|
||||
# - we are deploying containers without kv AND host is either a mon OR a nfs OR an osd
|
||||
|
|
|
@ -96,11 +96,11 @@ openstack_pools:
|
|||
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
|
||||
# By default, keys will be auto-generated.
|
||||
openstack_keys:
|
||||
- { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "mon 'allow r'", osd_cap: "osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
|
||||
- { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "mon 'allow r'", osd_cap: "osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
|
||||
- { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "mon 'allow r'", osd_cap: "osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
|
||||
- { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "mon 'allow r'", osd_cap: "osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}'" }
|
||||
- { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "mon 'allow r'", osd_cap: "osd 'allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=vms, allow rwx pool=volumes, allow rwx pool=backups'" }
|
||||
- { name: client.glance, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}" }
|
||||
- { name: client.cinder, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}" }
|
||||
- { name: client.cinder-backup, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}" }
|
||||
- { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}" }
|
||||
- { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=vms, allow rwx pool=volumes, allow rwx pool=backups" }
|
||||
|
||||
##########
|
||||
# DOCKER #
|
||||
|
|
|
@ -82,6 +82,16 @@
|
|||
- global_in_ceph_conf_overrides
|
||||
- ceph_conf_overrides.global.osd_pool_default_size is defined
|
||||
|
||||
- name: create rbd pool on luminous
|
||||
shell: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when:
|
||||
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
|
||||
- global_in_ceph_conf_overrides
|
||||
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
|
||||
- rbd_pool_exist.rc != 0
|
||||
|
||||
- include: openstack_config.yml
|
||||
when:
|
||||
- openstack_config
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: push ceph files to the ansible server
|
||||
fetch:
|
||||
src: "{{ item.0 }}"
|
||||
dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
|
||||
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item.0 }}"
|
||||
flat: yes
|
||||
with_together:
|
||||
- "{{ ceph_config_keys }}"
|
||||
|
|
|
@ -72,7 +72,7 @@
|
|||
- name: push ceph mgr key(s)
|
||||
fetch:
|
||||
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item.item]['ansible_hostname'] }}.keyring"
|
||||
dest: "{{ fetch_directory }}/docker_mon_files/{{ item.stat.path }}"
|
||||
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item.stat.path }}"
|
||||
flat: yes
|
||||
with_items:
|
||||
- "{{ stat_mgr_keys.results }}"
|
||||
|
|
|
@ -5,11 +5,10 @@
|
|||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
# NOTE: (leseb): I know this is not ideal since this only allows 2 caps.
|
||||
# A future version could use "--caps CAPSFILE"
|
||||
# which will set all of capabilities associated with a given key, for all subsystems
|
||||
- name: create openstack key(s)
|
||||
shell: "{{ docker_exec_cmd }} ceph-authtool -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap {{ item.mon_cap }} --cap {{ item.osd_cap }}"
|
||||
shell: "{{ docker_exec_cmd }} ceph-authtool -C /etc/ceph/{{ cluster }}.{{ item.name }}.keyring --name {{ item.name }} --add-key {{ item.key }} --cap mon \"{{ item.mon_cap|default('') }}\" --cap osd \"{{ item.osd_cap|default('') }}\" --cap mds \"{{ item.mds_cap|default('') }}\""
|
||||
args:
|
||||
creates: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
|
||||
with_items: "{{ openstack_keys }}"
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
- /var/lib/ceph/radosgw/keyring
|
||||
|
||||
- name: stat for config and keys
|
||||
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
@ -17,7 +17,7 @@
|
|||
|
||||
- name: try to fetch config and keys
|
||||
copy:
|
||||
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
|
||||
src: "{{ fetch_directory }}/{{ fsid }}/{{ item.0 }}"
|
||||
dest: "{{ item.0 }}"
|
||||
owner: "64045"
|
||||
group: "64045"
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
- inventory_hostname == groups[rgw_group_name][0]
|
||||
|
||||
- name: stat for config and keys
|
||||
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
with_items: "{{ rgw_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
@ -26,7 +26,7 @@
|
|||
- name: push ceph files to the ansible server
|
||||
fetch:
|
||||
src: "{{ item.0 }}"
|
||||
dest: "{{ fetch_directory }}/docker_mon_files/var/lib/ceph/radosgw/keyring"
|
||||
dest: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/radosgw/keyring"
|
||||
flat: yes
|
||||
with_together:
|
||||
- "{{ rgw_config_keys }}"
|
||||
|
|
|
@ -14,3 +14,6 @@ rgw0
|
|||
|
||||
[mgrs]
|
||||
mgr0
|
||||
|
||||
[clients]
|
||||
client0
|
||||
|
|
|
@ -10,7 +10,7 @@ mds_vms: 1
|
|||
rgw_vms: 1
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 1
|
||||
client_vms: 0
|
||||
client_vms: 1
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 1
|
||||
|
||||
|
@ -46,6 +46,7 @@ disks: "[ '/dev/sda', '/dev/sdb' ]"
|
|||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||
vagrant_box: centos/atomic-host
|
||||
client_vagrant_box: centos/7
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
|
|
|
@ -14,3 +14,7 @@ journal_collocation: True
|
|||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
||||
|
|
Loading…
Reference in New Issue