allow adding a MDS to already deployed cluster

Add a tox scenario that adds an new MDS node as a part of already
deployed Ceph cluster and deploys MDS there.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit c0dfa9b61a)
pull/3827/head
Rishabh Dave 2019-02-12 08:45:44 +05:30 committed by Guillaume Abrioux
parent 8715490223
commit c60915733a
14 changed files with 271 additions and 16 deletions

View File

@ -1,45 +1,60 @@
--- ---
- name: filesystem pools related tasks - name: compile a list of pool names
set_fact:
cephfs_pool_names: "{{ cephfs_pools | map(attribute='name') | list }}"
- name: check if filesystem pool already exists
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: get and store list of filesystem pools
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls"
changed_when: false
register: osd_pool_ls
- name: look whether pools to be created are present in the output
set_fact:
fs_pools_created: True
when: osd_pool_ls.stdout_lines | intersect(cephfs_pool_names) | length > 0
- name: deploy filesystem pools
when: fs_pools_created is not defined
delegate_to: "{{ groups[mon_group_name][0] }}"
block: block:
- name: create filesystem pools - name: create filesystem pools
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ cephfs_pools }}"
with_items:
- "{{ cephfs_pools }}"
- name: customize pool size - name: customize pool size
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}" with_items: "{{ cephfs_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false changed_when: false
when: when:
- item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size - item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size
- name: customize pool min_size - name: customize pool min_size
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}"
with_items: "{{ cephfs_pools | unique }}" with_items: "{{ cephfs_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false changed_when: false
when: when:
- (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size
- name: check if ceph filesystem already exists - name: check if ceph filesystem already exists
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
register: check_existing_cephfs register: check_existing_cephfs
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
failed_when: false failed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create ceph filesystem - name: create ceph filesystem
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: when:
- check_existing_cephfs.rc != 0 - check_existing_cephfs.rc != 0
- name: assign application to cephfs pools - name: assign application to cephfs pools
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} {{ cephfs }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} {{ cephfs }}"
with_items: with_items:
- "{{ cephfs_data }}" - "{{ cephfs_data }}"
- "{{ cephfs_metadata }}" - "{{ cephfs_metadata }}"
@ -49,14 +64,14 @@
- check_existing_cephfs.rc != 0 - check_existing_cephfs.rc != 0
- name: allow multimds - name: allow multimds
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: when:
- ceph_release_num[ceph_release] == ceph_release_num.luminous - ceph_release_num[ceph_release] == ceph_release_num.luminous
- name: set max_mds - name: set max_mds
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}" command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: when:

View File

@ -0,0 +1 @@
../../../Vagrantfile

View File

@ -0,0 +1 @@
../all_daemons/ceph-override.json

View File

@ -0,0 +1 @@
../../../../Vagrantfile

View File

@ -0,0 +1 @@
../../all_daemons/ceph-override.json

View File

@ -0,0 +1,28 @@
---
docker: True
ceph_origin: repository
ceph_repository: community
containerized_deployment: true
cluster: ceph
public_network: "192.168.63.0/24"
cluster_network: "192.168.64.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,8 @@
[mons]
mon0
[osds]
osd0
[mdss]
mds0

View File

@ -0,0 +1,70 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 1
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.63
cluster_subnet: 192.168.64
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,25 @@
---
ceph_origin: repository
ceph_repository: community
cluster: ceph
public_network: "192.168.61.0/24"
cluster_network: "192.168.62.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,8 @@
[mons]
mon0
[osds]
osd0
[mdss]
mds0

View File

@ -0,0 +1,70 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 1
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.61
cluster_subnet: 192.168.62
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }

19
tox.ini
View File

@ -1,5 +1,5 @@
[tox] [tox]
envlist = {nautilus,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,update,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,rgw_multisite,purge,storage_inventory,lvm_auto_discovery} envlist = {nautilus,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,update,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_osds,add_mdss,rgw_multisite,purge,storage_inventory,lvm_auto_discovery}
{nautilus,rhcs}-{centos,ubuntu}-container-{ooo_collocation,podman} {nautilus,rhcs}-{centos,ubuntu}-container-{ooo_collocation,podman}
{nautilus,rhcs}-{centos,ubuntu}-non_container-{switch_to_containers} {nautilus,rhcs}-{centos,ubuntu}-non_container-{switch_to_containers}
nautilus-rhel-container-podman nautilus-rhel-container-podman
@ -158,6 +158,21 @@ commands=
" "
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
[add-mdss]
commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
[rgw-multisite] [rgw-multisite]
commands= commands=
bash -c "cd {changedir}/secondary && vagrant up --no-provision {posargs:--provider=virtualbox}" bash -c "cd {changedir}/secondary && vagrant up --no-provision {posargs:--provider=virtualbox}"
@ -260,6 +275,7 @@ changedir=
bluestore_lvm_osds: {toxinidir}/tests/functional/bs-lvm-osds{env:CONTAINER_DIR:} bluestore_lvm_osds: {toxinidir}/tests/functional/bs-lvm-osds{env:CONTAINER_DIR:}
ooo_collocation: {toxinidir}/tests/functional/ooo-collocation ooo_collocation: {toxinidir}/tests/functional/ooo-collocation
add_osds: {toxinidir}/tests/functional/add-osds{env:CONTAINER_DIR:} add_osds: {toxinidir}/tests/functional/add-osds{env:CONTAINER_DIR:}
add_mdss: {toxinidir}/tests/functional/add-mdss{env:CONTAINER_DIR:}
rgw_multisite: {toxinidir}/tests/functional/rgw-multisite{env:CONTAINER_DIR:} rgw_multisite: {toxinidir}/tests/functional/rgw-multisite{env:CONTAINER_DIR:}
podman: {toxinidir}/tests/functional/podman podman: {toxinidir}/tests/functional/podman
storage_inventory: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:} storage_inventory: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
@ -311,6 +327,7 @@ commands=
shrink_mon: {[shrink-mon]commands} shrink_mon: {[shrink-mon]commands}
shrink_osd: {[shrink-osd]commands} shrink_osd: {[shrink-osd]commands}
add_osds: {[add-osds]commands} add_osds: {[add-osds]commands}
add_mdss: {[add-mdss]commands}
rgw_multisite: {[rgw-multisite]commands} rgw_multisite: {[rgw-multisite]commands}
storage_inventory: {[storage-inventory]commands} storage_inventory: {[storage-inventory]commands}