allow custom pool size

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1596339
Signed-off-by: Rishabh Dave <ridave@redhat.com>
pull/3145/head
Rishabh Dave 2018-10-01 11:11:13 -04:00
parent 40b7747af7
commit cd1e4ee024
15 changed files with 203 additions and 93 deletions

View File

@ -554,6 +554,7 @@ dummy:
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -563,6 +564,7 @@ dummy:
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -572,6 +574,7 @@ dummy:
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -581,6 +584,7 @@ dummy:
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -590,6 +594,7 @@ dummy:
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"

View File

@ -554,6 +554,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -563,6 +564,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -572,6 +574,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -581,6 +584,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -590,6 +594,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
# size: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"

View File

@ -18,6 +18,7 @@ test:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -26,6 +27,7 @@ test2:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -84,7 +84,12 @@
- keys | length > 0
- inventory_hostname == groups.get('_filtered_clients') | first
- name: list existing pool(s)
- name: pool related tasks
when:
- condition_copy_admin_key
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
- name: list existing pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool get {{ item.name }} size
@ -92,11 +97,8 @@
register: created_pools
failed_when: false
delegate_to: "{{ delegated_node }}"
when:
- condition_copy_admin_key
- inventory_hostname == groups.get('_filtered_clients', []) | first
- name: create ceph pool(s)
- name: create ceph pool(s)
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
@ -115,10 +117,19 @@
delegate_to: "{{ delegated_node }}"
when:
- pools | length > 0
- condition_copy_admin_key
- inventory_hostname in groups.get('_filtered_clients') | first
- item.1.rc != 0
- name: customize pool size
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default('') }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegate_node }}"
changed_when: false
when:
- pools | length > 0
- item.size | default ("") != ""
- name: get client cephx keys
copy:
dest: "{{ item.source }}"

View File

@ -346,8 +346,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_pools:
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
## OSD options
#
@ -546,6 +546,7 @@ openstack_glance_pool:
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -555,6 +556,7 @@ openstack_cinder_pool:
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_nova_pool:
name: "vms"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -564,6 +566,7 @@ openstack_nova_pool:
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -573,6 +576,7 @@ openstack_cinder_backup_pool:
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -582,6 +586,27 @@ openstack_gnocchi_pool:
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_cephfs_data_pool:
name: "manila_data"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_cephfs_metadata_pool:
name: "manila_metadata"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
size: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
@ -589,6 +614,8 @@ openstack_pools:
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
- "{{ openstack_gnocchi_pool }}"
- "{{ openstack_cephfs_data_pool }}"
- "{{ openstack_cephfs_metadata_pool }}"
# The value for 'key' can be a pre-generated key,

View File

@ -56,6 +56,7 @@ client_connections: {}
# Whether or not to generate secure certificate to iSCSI gateway nodes
generate_crt: False
rbd_pool_size: ""
##################
# RBD-TARGET-API #

View File

@ -32,15 +32,23 @@
register: rbd_pool_exists
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: get default value for osd_pool_default_pg_num
- name: rbd pool related tasks
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
block:
- name: get default value for osd_pool_default_pg_num
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
changed_when: false
register: osd_pool_default_pg_num
delegate_to: "{{ groups[mon_group_name][0] }}"
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
- name: create a rbd pool if it doesn't exist
- name: create a rbd pool if it doesn't exist
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
- name: customize pool size
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default('') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- rbd_pool_size | default ("") != ""

View File

@ -1,11 +1,20 @@
---
- name: create filesystem pools
- name: filesystem pools related tasks
block:
- name: create filesystem pools
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items:
- "{{ cephfs_pools }}"
- name: customize pool size
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default('') }}"
with_items: "{{ cephfs_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: item.size | default ("") != ""
- name: check if ceph filesystem already exists
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
register: check_existing_cephfs

View File

@ -9,7 +9,9 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
until: wait_for_all_osds_up.rc == 0
- name: list existing pool(s)
- name: pool related tasks
block:
- name: list existing pool(s)
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool get {{ item.name }} size
@ -18,7 +20,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
failed_when: false
- name: create openstack pool(s)
- name: create openstack pool(s)
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
@ -38,7 +40,16 @@
when:
- item.1.get('rc', 0) != 0
- name: assign application to pool(s)
- name: customize pool size
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default('') }}
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: item.size | default ("") != ""
- name: assign application to pool(s)
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items: "{{ openstack_pools | unique }}"
changed_when: false
@ -82,3 +93,4 @@
- cephx
- openstack_config
- item.0 != groups[mon_group_name]

View File

@ -37,10 +37,13 @@ rgw_pull_proto: "http"
#rgw_create_pools:
# defaults.rgw.buckets.data:
# pg_num: 16
# size: ""
# defaults.rgw.buckets.index:
# pg_num: 32
# size: ""
# foo:
# pg_num: 4
# size: ""
##########

View File

@ -24,11 +24,21 @@
include_tasks: docker/main.yml
when: containerized_deployment
- name: create rgw pools if rgw_create_pools is defined
- name: rgw pool realted tasks
when:
- rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- rgw_create_pools is defined
- name: customize pool size
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default('') }}"
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
run_once: true
when: item.size | default ("") != ""

View File

@ -1,9 +1,20 @@
{
"ceph_conf_overrides": {
"global": {
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1
"osd_pool_default_pg_num": 12
}
},
"cephfs_pools": [
{
"name": "cephfs_metadata",
"pgs": 8,
"size": 2
},
{
"name": "cephfs_data",
"pgs": 8,
"size": 2
}
],
"ceph_mon_docker_memory_limit": "2g"
}

View File

@ -27,6 +27,7 @@ openstack_glance_pool:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -35,6 +36,7 @@ openstack_cinder_pool:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -9,6 +9,7 @@ test:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -17,6 +18,7 @@ test2:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
pools:
- "{{ test }}"
- "{{ test2 }}"

View File

@ -28,6 +28,7 @@ openstack_glance_pool:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
@ -36,6 +37,7 @@ openstack_cinder_pool:
type: 1
erasure_profile: ""
expected_num_objects: ""
size: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"