Merge pull request #1976 from ceph/rbd_pool

Only perform actions on the rbd pool after it has been created
pull/1992/head v3.0.0rc16
Sébastien Han 2017-10-04 18:52:49 +02:00 committed by GitHub
commit 163c87af79
27 changed files with 105 additions and 113 deletions

View File

@ -6,5 +6,4 @@
include: create_users_keys.yml
when:
- user_config
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -21,7 +21,3 @@
when:
- cephx
- copy_admin_key
- name: set_fact global_in_ceph_conf_overrides
set_fact:
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"

View File

@ -1,4 +1,19 @@
---
- name: check if a rbd pool exists
command: ceph --cluster {{ cluster }} osd pool ls --format json
register: rbd_pool_exists
- name: get default value for osd_pool_default_pg_num
command: ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num
register: osd_pool_default_pg_num
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create a rbd pool if it doesn't exist
command: ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: igw_gateway (tgt) | configure iscsi target (gateway)
igw_gateway:
mode: "target"

View File

@ -78,55 +78,11 @@
- ceph_release_num.{{ ceph_release }} > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}"
- name: include set_osd_pool_default_pg_num.yml
include: set_osd_pool_default_pg_num.yml
- name: crush_rules.yml
include: crush_rules.yml
when:
- crush_rule_config
- name: test if rbd exists
shell: |
ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
changed_when: false
failed_when: false
register: rbd_pool_exist
- name: include rbd_pool.yml
include: rbd_pool.yml
when: rbd_pool_exist.rc == 0
- name: include rbd_pool_pgs.yml
include: rbd_pool_pgs.yml
when:
- rbd_pool_exist.rc == 0
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- name: include rbd_pool_size.yml
include: rbd_pool_size.yml
when:
- rbd_pool_exist.rc == 0
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_size is defined
- name: create rbd pool on luminous
shell: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
changed_when: false
failed_when: false
when:
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- rbd_pool_exist.rc != 0
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false

View File

@ -89,16 +89,6 @@
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- not containerized_deployment_with_kv
- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
# create openstack pools only when all mons are up.
- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/openstack_config.yml"
when:
- openstack_config
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- block:
- name: create ceph mgr keyring(s) when mon is containerized
command: docker exec ceph-mon-{{ ansible_hostname }} ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring

View File

@ -32,6 +32,23 @@
include: docker/main.yml
when: containerized_deployment
- name: include set_osd_pool_default_pg_num.yml
include: set_osd_pool_default_pg_num.yml
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
# CEPH creates the rbd pool during the ceph cluster initialization in
# releases prior to luminous. If the rbd_pool.yml playbook is called too
# early, the rbd pool does not exist yet.
- name: include rbd_pool.yml
include: rbd_pool.yml
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: include create_mds_filesystems.yml
include: create_mds_filesystems.yml
when:

View File

@ -1,17 +1,25 @@
---
- name: check rbd pool usage
- name: test if rbd exists
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
"{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd"
changed_when: false
failed_when: false
always_run: true
register: rbd_pool_df
run_once: true
check_mode: true
register: rbd_pool_exist
- name: check pg num for rbd pool
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
changed_when: false
failed_when: false
always_run: true
register: rbd_pool_pgs
- name: include rbd_pool_df.yml
include: rbd_pool_df.yml
when: rbd_pool_exist.rc == 0
- name: include rbd_pool_pgs.yml
include: rbd_pool_pgs.yml
when:
- rbd_pool_exist.rc == 0
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
- name: include rbd_pool_size.yml
include: rbd_pool_size.yml
when:
- rbd_pool_exist.rc == 0
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -0,0 +1,14 @@
---
- name: verify that rbd pool exist
fail:
msg: "rbd pool does not exist in rbd_pool_df"
when: rbd_pool_exist.rc == 0
- name: check rbd pool usage
shell: |
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'"
changed_when: false
failed_when: false
check_mode: true
run_once: true
register: rbd_pool_df

View File

@ -1,10 +1,25 @@
---
- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
- name: verify that rbd pool exist
fail:
msg: "rbd pool does not exist in rbd_pool_pgs"
when: rbd_pool_exist.rc == 0
- name: check pg num for rbd pool
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool rm rbd rbd --yes-i-really-really-mean-it
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'"
changed_when: false
failed_when: false
check_mode: true
run_once: true
register: rbd_pool_pgs
- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
shell: |
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it"
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
changed_when: false
failed_when: false
run_once: true
when:
- rbd_pool_df.stdout == "0"
- rbd_pool_pgs.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"

View File

@ -1,16 +1,23 @@
---
- name: verify that rbd pool exist
fail:
msg: "rbd pool does not exist in rbd_pool_size"
when: rbd_pool_exist.rc == 0
- name: check size for rbd pool
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'
"{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'"
changed_when: false
failed_when: false
always_run: true
check_mode: true
run_once: true
register: rbd_pool_size
- name: change rbd pool size if osd_pool_default_size is not honoured
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}
command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}"
changed_when: false
failed_when: false
run_once: true
when:
- rbd_pool_df.stdout == "0"
- rbd_pool_size.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_size }}"

View File

@ -2,40 +2,30 @@
# so people that had 'pool_default_pg_num' declared will get
# the same behaviour
#
- name: check if does global key exist in ceph_conf_overrides
set_fact:
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
- name: check if ceph_conf_overrides.global.osd_pool_default_pg_num is set
set_fact:
osd_pool_default_pg_num_in_overrides: "{{ 'osd_pool_default_pg_num' in ceph_conf_overrides.global }}"
when: global_in_ceph_conf_overrides
- name: get default value for osd_pool_default_pg_num
shell: |
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num | grep -Po '(?<="osd_pool_default_pg_num": ")[^"]*'
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num
failed_when: false
changed_when: false
run_once: true
register: default_pool_default_pg_num
when: pool_default_pg_num is not defined or not global_in_ceph_conf_overrides
when:
- pool_default_pg_num is not defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
- name: set_fact osd_pool_default_pg_num pool_default_pg_num
- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
- name: set_fact osd_pool_default_pg_num default_pool_default_pg_num.stdout
- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
set_fact:
osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
when:
- pool_default_pg_num is not defined
- default_pool_default_pg_num.rc == 0
- (osd_pool_default_pg_num_in_overrides is not defined or not osd_pool_default_pg_num_in_overrides)
- default_pool_default_pg_num.get('rc') == 0
- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False

View File

@ -23,7 +23,6 @@ user_config: True
openstack_config: True
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -21,7 +21,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -18,7 +18,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -18,7 +18,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -18,7 +18,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
osd:
bluestore block db size = 67108864

View File

@ -22,7 +22,6 @@ user_config: True
openstack_config: True
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
nfs_ganesha_stable: true
nfs_ganesha_dev: false

View File

@ -21,5 +21,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -18,5 +18,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -24,7 +24,6 @@ rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
user_config: True
keys:

View File

@ -17,5 +17,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -16,5 +16,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -18,5 +18,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -25,5 +25,4 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -30,7 +30,6 @@ class TestMDSs(object):
@pytest.mark.docker
def test_docker_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
hostname = node["groups"]["mons"][0]["inventory_hostname"]
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
hostname=node["vars"]["inventory_hostname"],
cluster=node["cluster_name"]

View File

@ -20,7 +20,6 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1
debian_ceph_packages:
- ceph

View File

@ -220,7 +220,7 @@ commands=
# wait 5 minutes for services to be ready
sleep 300
# test cluster state using ceph-ansible tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# reboot all vms
vagrant reload --no-provision
@ -228,7 +228,7 @@ commands=
# wait 5 minutes for services to be ready
sleep 300
# retest to ensure cluster came back up correctly after rebooting
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# handlers/idempotency test
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} \