allow adding a manager to a deployed cluster

Add a playbook that deploys manager on a new node and adds that node to
the already deployed Ceph cluster.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1677431
Signed-off-by: Rishabh Dave <ridave@redhat.com>
pull/3943/head
Rishabh Dave 2019-02-09 13:16:12 +05:30 committed by Guillaume Abrioux
parent 6e8fb2b3ea
commit d2cfd8b780
13 changed files with 241 additions and 1 deletions

View File

@ -0,0 +1 @@
../../../Vagrantfile

View File

@ -0,0 +1 @@
../all_daemons/ceph-override.json

View File

@ -0,0 +1 @@
../../../../Vagrantfile

View File

@ -0,0 +1 @@
../../all_daemons/ceph-override.json

View File

@ -0,0 +1,28 @@
---
docker: True
ceph_origin: repository
ceph_repository: community
containerized_deployment: true
cluster: ceph
public_network: "192.168.75.0/24"
cluster_network: "192.168.76.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,8 @@
[mons]
mon0
[osds]
osd0
[mgrs]
mgr0

View File

@ -0,0 +1,70 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: true
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 1
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.75
cluster_subnet: 192.168.76
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,25 @@
---
ceph_origin: repository
ceph_repository: dev
cluster: ceph
public_network: "192.168.73.0/24"
cluster_network: "192.168.74.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
- data: data-lv1
data_vg: test_group
- data: data-lv2
data_vg: test_group
db: journal1
db_vg: journals
os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_size: 1

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,8 @@
[mons]
mon0
[osds]
osd0
[mgrs]
mgr0

View File

@ -0,0 +1,70 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 1
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.73
cluster_subnet: 192.168.74
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: fs.file-max, value: 26234859 }

19
tox.ini
View File

@ -1,5 +1,5 @@
[tox]
envlist = {dev,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_mons,add_osds,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery}
envlist = {dev,rhcs}-{centos,ubuntu}-{container,non_container}-{all_daemons,collocation,bluestore_lvm_osds,lvm_osds,shrink_mon,shrink_osd,lvm_batch,add_mons,add_osds,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery}
{dev,rhcs}-{centos,ubuntu}-container-{ooo_collocation,podman}
{dev,rhcs}-{centos,ubuntu}-non_container-{switch_to_containers}
dev-rhel-container-podman
@ -241,6 +241,21 @@ commands=
"
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
[add-mgrs]
commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-master} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
[add-mdss]
commands=
ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
@ -394,6 +409,7 @@ changedir=
ooo_collocation: {toxinidir}/tests/functional/ooo-collocation
add_mons: {toxinidir}/tests/functional/add-mons{env:CONTAINER_DIR:}
add_osds: {toxinidir}/tests/functional/add-osds{env:CONTAINER_DIR:}
add_mgrs: {toxinidir}/tests/functional/add-mgrs{env:CONTAINER_DIR:}
add_mdss: {toxinidir}/tests/functional/add-mdss{env:CONTAINER_DIR:}
add_rbdmirrors: {toxinidir}/tests/functional/add-rbdmirrors{env:CONTAINER_DIR:}
add_rgws: {toxinidir}/tests/functional/add-rgws{env:CONTAINER_DIR:}
@ -451,6 +467,7 @@ commands=
shrink_osd: {[shrink-osd]commands}
add_osds: {[add-osds]commands}
add_mons: {[add-mons]commands}
add_mgrs: {[add-mgrs]commands}
add_mdss: {[add-mdss]commands}
add_rbdmirrors: {[add-rbdmirrors]commands}
add_rgws: {[add-rgws]commands}