Merge pull request #1797 from ceph/purge-lvm

adds purge support for the lvm_osds osd scenario
pull/1799/head
Alfredo Deza 2017-08-23 14:28:29 -04:00 committed by GitHub
commit e651469a2a
7 changed files with 100 additions and 21 deletions

View File

@ -173,17 +173,29 @@ is only available when the ceph release is Luminous or newer.
Use ``osd_scenario: lvm`` to enable this scenario. Currently we only support dedicated journals
when using lvm, not collocated journals.
To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a dictionary whose
key/value pairs represent a data lv and a journal pair. Journals can be either a lv, device or partition.
You can not use the same journal for many data lvs.
To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a list of dictionaries which can
contain a ``data``, ``journal``, ``data_vg`` and ``journal_vg`` key. The ``data`` key represents the logical volume name that is to be used for your OSD
data. The ``journal`` key represents the logical volume name, device or partition that will be used for your OSD journal. The ``data_vg``
key represents the volume group name that your ``data`` logical volume resides on. This key is required for purging of OSDs created
by this scenario. The ``journal_vg`` key is optional and should be the volume group name that your journal lv resides on, if applicable.
.. note::
Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path.
.. note::
You can not use the same journal for many OSDs.
For example, a configuration to use the ``lvm`` osd scenario would look like::
osd_scenario: lvm
lvm_volumes:
data-lv1: journal-lv1
data-lv2: /dev/sda
data:lv3: /dev/sdb1
- data: data-lv1
data_vg: vg1
journal: journal-lv1
journal_vg: vg2
- data: data-lv2
journal: /dev/sda
data_vg: vg1
- data: data-lv3
journal: /dev/sdb1
data_vg: vg2

View File

@ -194,12 +194,17 @@
tasks:
- name: set devices if osd scenario is lvm
set_fact:
devices: []
when: osd_scenario == "lvm"
- name: check for a device list
fail:
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable."
when:
- devices|length == 0
- osd_auto_discovery
- osd_auto_discovery|default(false)
- name: get osd numbers
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
@ -333,6 +338,24 @@
- ceph_disk_present.rc == 0
- ceph_data_partlabels.rc == 0
# this should go away once 'ceph-volume lvm zap' is available
- name: remove osd logical volumes
command: "lvremove -f {{ item.data_vg }}/{{ item.data }}"
with_items: "{{ lvm_volumes }}"
when:
- osd_scenario == "lvm"
# this should go away once 'ceph-volume lvm zap' is available
- name: remove osd lvm journals
command: "lvremove -f {{ item.journal_vg }}/{{ item.journal }}"
with_items: "{{ lvm_volumes }}"
# journals might be logical volumes, but they could also be
# devices so fail silently if this doesn't work
failed_when: false
when:
- osd_scenario == "lvm"
- item.journal_vg is defined
- name: get ceph journal partitions
shell: |
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'

View File

@ -191,15 +191,25 @@ bluestore_wal_devices: "{{ dedicated_devices }}"
# III. Use ceph-volume to create OSDs from logical volumes.
# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
# when using lvm, not collocated journals.
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
# Any logical volume or logical group used must be a name and not a path.
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
# lvm_volumes is a list of dictionaries. Each dictionary must contain a data, journal and vg_name
# key. Any logical volume or logical group used must be a name and not a path.
# data must be a logical volume
# journal can be either a lv, device or partition. You can not use the same journal for many data lvs.
# data_vg must be the volume group name of the data lv
# journal_vg is optional and must be the volume group name of the journal lv, if applicable
# For example:
# lvm_volumes:
# data-lv1: journal-lv1
# data-lv2: /dev/sda
# data:lv3: /dev/sdb1
lvm_volumes: {}
# - data: data-lv1
# data_vg: vg1
# journal: journal-lv1
# journal_vg: vg2
# - data: data-lv2
# journal: /dev/sda
# data_vg: vg1
# - data: data-lv3
# journal: /dev/sdb1
# data_vg: vg2
lvm_volumes: []
##########

View File

@ -77,15 +77,15 @@
- not osd_auto_discovery
- lvm_volumes|length == 0
- name: make sure the lvm_volumes variable is a dictionary
- name: make sure the lvm_volumes variable is a list
fail:
msg: "lvm_volumes: must be a dictionary"
msg: "lvm_volumes: must be a list"
when:
- osd_group_name is defined
- osd_group_name in group_names
- not osd_auto_discovery
- osd_scenario == "lvm"
- lvm_volumes is not mapping
- lvm_volumes is string
- name: make sure the devices variable is a list
fail:

View File

@ -1,4 +1,4 @@
---
- name: use ceph-volume to create filestore osds with dedicated journals
command: "ceph-volume lvm create --filestore --data {{ item.key }} --journal {{ item.value }}"
with_dict: "{{ lvm_volumes }}"
command: "ceph-volume lvm create --filestore --data {{ item.data }} --journal {{ item.journal }}"
with_items: "{{ lvm_volumes }}"

View File

@ -12,7 +12,9 @@ osd_scenario: lvm
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
lvm_volumes:
test_volume: /dev/sdc
- data: test_volume
journal: /dev/sdc
data_vg: test_group
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

34
tox.ini
View File

@ -1,6 +1,6 @@
[tox]
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster}
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds}
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds}
skipsdist = True
@ -20,6 +20,35 @@ commands=
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
"
# set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
"
# test that the cluster can be redeployed in a healthy state
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
[purge-lvm]
commands=
cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
"
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
# set up the cluster again
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
@ -118,6 +147,7 @@ changedir=
bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn
bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
commands=
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
@ -127,6 +157,7 @@ commands=
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
@ -145,6 +176,7 @@ commands=
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
purge_cluster: {[purge]commands}
purge_lvm_osds: {[purge-lvm]commands}
purge_dmcrypt: {[purge]commands}
purge_docker_cluster: {[purge]commands}
update_dmcrypt: {[update]commands}