align cephfs pool creation

The definitions of cephfs pools should match openstack pools.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
Co-Authored-by: Simone Caronni <simone.caronni@teralytics.net>
(cherry picked from commit 67071c3169)
pull/4121/head
Rishabh Dave 2018-04-10 11:32:58 +02:00 committed by Guillaume Abrioux
parent 6e565b251d
commit c51e0b51d2
9 changed files with 103 additions and 57 deletions

View File

@ -341,24 +341,28 @@ dummy:
# CEPHFS #
##########
#cephfs: cephfs # name of the ceph filesystem
#cephfs_data: cephfs_data # name of the data pool for a given filesystem
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_data_pool:
# name: "{{ cephfs_data }}"
# pgs: "{{ osd_pool_default_pg_num }}"
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# rule_name: "replicated_rule"
#cephfs_metadata_pool:
# name: "{{ cephfs_metadata }}"
# pgs: "{{ osd_pool_default_pg_num }}"
# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# rule_name: "replicated_rule"
#cephfs_pools:
# - "{{ cephfs_data_pool }}"
# - "{{ cephfs_metadata_pool }}"

View File

@ -341,24 +341,28 @@ ceph_rhcs_version: 4
# CEPHFS #
##########
#cephfs: cephfs # name of the ceph filesystem
#cephfs_data: cephfs_data # name of the data pool for a given filesystem
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_data_pool:
# name: "{{ cephfs_data }}"
# pgs: "{{ osd_pool_default_pg_num }}"
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# rule_name: "replicated_rule"
#cephfs_metadata_pool:
# name: "{{ cephfs_metadata }}"
# pgs: "{{ osd_pool_default_pg_num }}"
# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# expected_num_objects: ""
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
# rule_name: "replicated_rule"
#cephfs_pools:
# - "{{ cephfs_data_pool }}"
# - "{{ cephfs_metadata_pool }}"

View File

@ -333,24 +333,28 @@ mon_host_v2:
# CEPHFS #
##########
cephfs: cephfs # name of the ceph filesystem
cephfs_data: cephfs_data # name of the data pool for a given filesystem
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_data_pool:
name: "{{ cephfs_data }}"
pgs: "{{ osd_pool_default_pg_num }}"
name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
rule_name: "replicated_rule"
cephfs_metadata_pool:
name: "{{ cephfs_metadata }}"
pgs: "{{ osd_pool_default_pg_num }}"
name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
rule_name: "replicated_rule"
cephfs_pools:
- "{{ cephfs_data_pool }}"
- "{{ cephfs_metadata_pool }}"

View File

@ -25,8 +25,8 @@
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }}
{{ item.pg_num | default(item.pgs) | default(osd_pool_default_pg_num) }}
{{ item.pgp_num | default(item.pgs) | default(item.pg_num) | default(osd_pool_default_pg_num) }}
{{ item.pg_num | default(osd_pool_default_pg_num) }}
{{ item.pgp_num | default(item.pg_num) }}
{{ 'replicated_rule' if not item.rule_name | default('replicated_rule') else item.rule_name | default('replicated_rule') }}
{{ 1 if item.type|default(1) == 'replicated' else 3 if item.type|default(1) == 'erasure' else item.type|default(1) }}
{%- if (item.type | default("1") == '3' or item.type | default("1") == 'erasure') and item.erasure_profile != '' %}
@ -50,10 +50,10 @@
when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: assign application to cephfs pools
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
with_items:
- "{{ cephfs_data }}"
- "{{ cephfs_metadata }}"
- "{{ cephfs_data_pool }}"
- "{{ cephfs_metadata_pool }}"
changed_when: false
- name: check and create ceph filesystem
@ -66,7 +66,7 @@
failed_when: false
- name: create ceph filesystem
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata_pool.name }} {{ cephfs_data_pool.name }}"
changed_when: false
when: check_existing_cephfs.rc != 0

View File

@ -6,7 +6,7 @@
when: containerized_deployment | bool
- name: check if rados index object exists
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
changed_when: false
failed_when: false
register: rados_index_exists
@ -16,7 +16,7 @@
run_once: true
- name: create an empty rados index object
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
when:
- ceph_nfs_rados_backend | bool
- rados_index_exists.rc != 0

View File

@ -33,7 +33,7 @@ RADOS_URLS {
ceph_conf = '/etc/ceph/{{ cluster }}.conf';
userid = "{{ ceph_nfs_ceph_user }}";
}
%url rados://{{ cephfs_data }}/{{ ceph_nfs_rados_export_index }}
%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }}
NFSv4 {
RecoveryBackend = 'rados_kv';
@ -41,7 +41,7 @@ NFSv4 {
RADOS_KV {
ceph_conf = '/etc/ceph/{{ cluster }}.conf';
userid = "{{ ceph_nfs_ceph_user }}";
pool = "{{ cephfs_data }}";
pool = "{{ cephfs_data_pool.name }}";
}
{% endif %}

View File

@ -12,14 +12,28 @@
},
"cephfs_pools": [
{
"name": "cephfs_metadata",
"pgs": 8,
"size": 1
"name": "cephfs_data",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 3,
"min_size": 0
},
{
"name": "cephfs_data",
"pgs": 8,
"size": 1
"name": "cephfs_metadata",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 3,
"min_size": 0
}
],
"ceph_mon_docker_memory_limit": "2g"

View File

@ -30,4 +30,4 @@ openstack_cinder_pool:
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
- "{{ openstack_cinder_pool }}"

View File

@ -16,11 +16,31 @@ all:
ceph_repository: community
ceph_release: luminous
ceph_stable: true
cephfs_data: manila_data
cephfs_metadata: manila_metadata
cephfs_data_pool:
name: 'manila_data'
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
cephfs_metadata_pool:
name: 'manila_metadata'
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
expected_num_objects: ""
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
cephfs_pools:
- {name: manila_data, pgs: 8}
- {name: manila_metadata, pgs: 8}
- "{{ cephfs_data_pool }}"
- "{{ cephfs_metadata_pool }}"
cluster_network: 192.168.96.0/24
containerized_deployment: true
devices: [/dev/sda, /dev/sdb, /dev/sdc]