From 67071c3169f40621baec2aa51504e9f361eaf890 Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Tue, 10 Apr 2018 11:32:58 +0200 Subject: [PATCH] align cephfs pool creation The definitions of cephfs pools should match openstack pools. Signed-off-by: Rishabh Dave Co-Authored-by: Simone Caronni --- group_vars/all.yml.sample | 28 +++++++++++-------- group_vars/rhcs.yml.sample | 28 +++++++++++-------- roles/ceph-defaults/defaults/main.yml | 28 +++++++++++-------- .../ceph-mds/tasks/create_mds_filesystems.yml | 12 ++++---- roles/ceph-nfs/tasks/start_nfs.yml | 4 +-- roles/ceph-nfs/templates/ganesha.conf.j2 | 4 +-- .../functional/all_daemons/ceph-override.json | 26 +++++++++++++---- tests/functional/all_daemons/group_vars/all | 2 +- tests/functional/ooo-collocation/hosts | 28 ++++++++++++++++--- 9 files changed, 103 insertions(+), 57 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 305d14e2d..28bb0ce8b 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -341,24 +341,28 @@ dummy: # CEPHFS # ########## #cephfs: cephfs # name of the ceph filesystem -#cephfs_data: cephfs_data # name of the data pool for a given filesystem -#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem - #cephfs_data_pool: -# name: "{{ cephfs_data }}" -# pgs: "{{ osd_pool_default_pg_num }}" +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" -# rule_name: "replicated_rule" - #cephfs_metadata_pool: -# name: "{{ cephfs_metadata }}" -# pgs: "{{ osd_pool_default_pg_num }}" +# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" -# rule_name: "replicated_rule" - - #cephfs_pools: # - "{{ cephfs_data_pool }}" # - "{{ cephfs_metadata_pool }}" diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index b67529cb2..f1fc0a04f 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -341,24 +341,28 @@ ceph_rhcs_version: 4 # CEPHFS # ########## #cephfs: cephfs # name of the ceph filesystem -#cephfs_data: cephfs_data # name of the data pool for a given filesystem -#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem - #cephfs_data_pool: -# name: "{{ cephfs_data }}" -# pgs: "{{ osd_pool_default_pg_num }}" +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" -# rule_name: "replicated_rule" - #cephfs_metadata_pool: -# name: "{{ cephfs_metadata }}" -# pgs: "{{ osd_pool_default_pg_num }}" +# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" +# pg_num: "{{ osd_pool_default_pg_num }}" +# pgp_num: "{{ osd_pool_default_pg_num }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "cephfs" # size: "{{ osd_pool_default_size }}" # min_size: "{{ osd_pool_default_min_size }}" -# rule_name: "replicated_rule" - - #cephfs_pools: # - "{{ cephfs_data_pool }}" # - "{{ cephfs_metadata_pool }}" diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 632ffde3d..d14ab05d0 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -333,24 +333,28 @@ mon_host_v2: # CEPHFS # ########## cephfs: cephfs # name of the ceph filesystem -cephfs_data: cephfs_data # name of the data pool for a given filesystem -cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem - cephfs_data_pool: - name: "{{ cephfs_data }}" - pgs: "{{ osd_pool_default_pg_num }}" + name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" + pg_num: "{{ osd_pool_default_pg_num }}" + pgp_num: "{{ osd_pool_default_pg_num }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + application: "cephfs" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" - rule_name: "replicated_rule" - cephfs_metadata_pool: - name: "{{ cephfs_metadata }}" - pgs: "{{ osd_pool_default_pg_num }}" + name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" + pg_num: "{{ osd_pool_default_pg_num }}" + pgp_num: "{{ osd_pool_default_pg_num }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + application: "cephfs" size: "{{ osd_pool_default_size }}" min_size: "{{ osd_pool_default_min_size }}" - rule_name: "replicated_rule" - - cephfs_pools: - "{{ cephfs_data_pool }}" - "{{ cephfs_metadata_pool }}" diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index 6689deb4f..a45c2b6d8 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -25,8 +25,8 @@ command: > {{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} - {{ item.pg_num | default(item.pgs) | default(osd_pool_default_pg_num) }} - {{ item.pgp_num | default(item.pgs) | default(item.pg_num) | default(osd_pool_default_pg_num) }} + {{ item.pg_num | default(osd_pool_default_pg_num) }} + {{ item.pgp_num | default(item.pg_num) }} {{ 'replicated_rule' if not item.rule_name | default('replicated_rule') else item.rule_name | default('replicated_rule') }} {{ 1 if item.type|default(1) == 'replicated' else 3 if item.type|default(1) == 'erasure' else item.type|default(1) }} {%- if (item.type | default("1") == '3' or item.type | default("1") == 'erasure') and item.erasure_profile != '' %} @@ -50,10 +50,10 @@ when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - name: assign application to cephfs pools - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" with_items: - - "{{ cephfs_data }}" - - "{{ cephfs_metadata }}" + - "{{ cephfs_data_pool }}" + - "{{ cephfs_metadata_pool }}" changed_when: false - name: check and create ceph filesystem @@ -66,7 +66,7 @@ failed_when: false - name: create ceph filesystem - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata_pool.name }} {{ cephfs_data_pool.name }}" changed_when: false when: check_existing_cephfs.rc != 0 diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml index 581377848..ee1023526 100644 --- a/roles/ceph-nfs/tasks/start_nfs.yml +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -6,7 +6,7 @@ when: containerized_deployment | bool - name: check if rados index object exists - shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" + shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" changed_when: false failed_when: false register: rados_index_exists @@ -16,7 +16,7 @@ run_once: true - name: create an empty rados index object - command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" + command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" when: - ceph_nfs_rados_backend | bool - rados_index_exists.rc != 0 diff --git a/roles/ceph-nfs/templates/ganesha.conf.j2 b/roles/ceph-nfs/templates/ganesha.conf.j2 index b441db704..e95d6aee1 100644 --- a/roles/ceph-nfs/templates/ganesha.conf.j2 +++ b/roles/ceph-nfs/templates/ganesha.conf.j2 @@ -33,7 +33,7 @@ RADOS_URLS { ceph_conf = '/etc/ceph/{{ cluster }}.conf'; userid = "{{ ceph_nfs_ceph_user }}"; } -%url rados://{{ cephfs_data }}/{{ ceph_nfs_rados_export_index }} +%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }} NFSv4 { RecoveryBackend = 'rados_kv'; @@ -41,7 +41,7 @@ NFSv4 { RADOS_KV { ceph_conf = '/etc/ceph/{{ cluster }}.conf'; userid = "{{ ceph_nfs_ceph_user }}"; - pool = "{{ cephfs_data }}"; + pool = "{{ cephfs_data_pool.name }}"; } {% endif %} diff --git a/tests/functional/all_daemons/ceph-override.json b/tests/functional/all_daemons/ceph-override.json index e7d6137a6..cd4f4ca00 100644 --- a/tests/functional/all_daemons/ceph-override.json +++ b/tests/functional/all_daemons/ceph-override.json @@ -12,14 +12,28 @@ }, "cephfs_pools": [ { - "name": "cephfs_metadata", - "pgs": 8, - "size": 1 + "name": "cephfs_data", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 3, + "min_size": 0 }, { - "name": "cephfs_data", - "pgs": 8, - "size": 1 + "name": "cephfs_metadata", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 3, + "min_size": 0 } ], "ceph_mon_docker_memory_limit": "2g" diff --git a/tests/functional/all_daemons/group_vars/all b/tests/functional/all_daemons/group_vars/all index a12c41d8d..21e0918ec 100644 --- a/tests/functional/all_daemons/group_vars/all +++ b/tests/functional/all_daemons/group_vars/all @@ -30,4 +30,4 @@ openstack_cinder_pool: application: rbd openstack_pools: - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" \ No newline at end of file + - "{{ openstack_cinder_pool }}" diff --git a/tests/functional/ooo-collocation/hosts b/tests/functional/ooo-collocation/hosts index 1275a105b..12f1e35d1 100644 --- a/tests/functional/ooo-collocation/hosts +++ b/tests/functional/ooo-collocation/hosts @@ -15,11 +15,31 @@ all: ceph_repository: community ceph_release: luminous ceph_stable: true - cephfs_data: manila_data - cephfs_metadata: manila_metadata + cephfs_data_pool: + name: 'manila_data' + pg_num: "{{ osd_pool_default_pg_num }}" + pgp_num: "{{ osd_pool_default_pg_num }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + application: "cephfs" + size: "{{ osd_pool_default_size }}" + min_size: "{{ osd_pool_default_min_size }}" + cephfs_metadata_pool: + name: 'manila_metadata' + pg_num: "{{ osd_pool_default_pg_num }}" + pgp_num: "{{ osd_pool_default_pg_num }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + application: "cephfs" + size: "{{ osd_pool_default_size }}" + min_size: "{{ osd_pool_default_min_size }}" cephfs_pools: - - {name: manila_data, pgs: 8} - - {name: manila_metadata, pgs: 8} + - "{{ cephfs_data_pool }}" + - "{{ cephfs_metadata_pool }}" cluster_network: 192.168.96.0/24 containerized_deployment: true devices: [/dev/sda, /dev/sdb, /dev/sdc]