From b93794bed409bff8e2eb5482f21575e775937124 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 19 Jul 2017 10:05:42 -0500 Subject: [PATCH 01/14] adds a new 'lvm_osds' osd scenario This scenario will create OSDs using ceph-volume and is only available in ceph releases greater than Luminous. Signed-off-by: Andrew Schoen --- roles/ceph-osd/defaults/main.yml | 13 ++++++++ roles/ceph-osd/tasks/check_mandatory_vars.yml | 30 +++++++++++++++++++ roles/ceph-osd/tasks/main.yml | 8 +++++ roles/ceph-osd/tasks/scenarios/lvm.yml | 5 ++++ 4 files changed, 56 insertions(+) create mode 100644 roles/ceph-osd/tasks/scenarios/lvm.yml diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 1070e46f7..fe4283450 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -187,6 +187,19 @@ dedicated_devices: [] # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" bluestore_wal_devices: "{{ dedicated_devices }}" +# VII. Use ceph-volume to create OSDs from logical volumes. +# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# when using lvm, not collocated journals. +# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. +# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. +# For example: +# lvm_volumes: +# data-lv1: journal-lv1 +# data-lv2: /dev/sda +# data:lv3: /dev/sdb1 +lvm_osds: false +lvm_volumes: {} + ########## # DOCKER # diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index c3ea103b0..353be99ee 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -46,6 +46,26 @@ - not osd_auto_discovery - devices|length == 0 +- name: verify lvm_volumes have been provided + fail: + msg: "please provide lvm_volumes to your osd scenario" + when: + - osd_group_name is defined + - osd_group_name in group_names + - lvm_osds + - not osd_auto_discovery + - lvm_volumes|length == 0 + +- name: make sure the lvm_volumes variable is a dictionary + fail: + msg: "lvm_volumes: must be a dictionary" + when: + - osd_group_name is defined + - osd_group_name in group_names + - not osd_auto_discovery + - lvm_osds + - lvm_volumes is not mapping + - name: make sure the devices variable is a list fail: msg: "devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]" @@ -87,3 +107,13 @@ - not containerized_deployment - osd_objectstore == 'bluestore' - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous + +- name: check if lvm_osds is supported by the selected ceph version + fail: + msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above." + when: + - osd_group_name is defined + - osd_group_name in group_names + - not containerized_deployment + - lvm_osds + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 5fd6185e0..299d7f933 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -20,6 +20,14 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False +- name: create lvm OSDs with ceph-volume + include: ./scenarios/lvm.yml + when: + - lvm_osds + - not containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + - include: ./docker/main.yml when: containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml new file mode 100644 index 000000000..7d439aa33 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -0,0 +1,5 @@ +--- + +- name: use ceph-volume to create filestore OSDs with dedicated journals + command: "ceph-volume lvm create --filestore --data {{ item.key }} --journal {{ item.value }}" + with_dict: "{{ lvm_volumes }}" From fd4a0210512dd9dec0834fdc9c576896813acb2a Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 19 Jul 2017 14:15:44 -0500 Subject: [PATCH 02/14] docs: add docs for the lvm_osds OSD scenario Signed-off-by: Andrew Schoen --- docs/source/index.rst | 6 ++++++ docs/source/osds/scenarios.rst | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 docs/source/osds/scenarios.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index 4239ead35..b4d53b9f0 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -18,6 +18,12 @@ Testing OSDs ==== +.. toctree:: + :maxdepth: 1 + + osds/scenarios + + MONs ==== diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst new file mode 100644 index 000000000..03c46a2d1 --- /dev/null +++ b/docs/source/osds/scenarios.rst @@ -0,0 +1,27 @@ +OSD Scenarios +============= + +lvm_osds +-------- +This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and +is only available when the ceph release is Luminous or greater. + +.. note:: + The creation of the logical volumes is not supported by ceph-ansible, ceph-volume + only creates OSDs from existing logical volumes. + +Use ``lvm_osds:true`` to enable this scenario. Currently we only support dedicated journals +when using lvm, not collocated journals. + +To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a dictionary whose +key/value pairs represent a data lv and a journal pair. Journals can be either a lv, device or partition. +You can not use the same journal for many data lvs. + +For example, a configuration to use ``lvm_osds`` would look like:: + + lvm_osds: true + + lvm_volumes: + data-lv1: journal-lv1 + data-lv2: /dev/sda + data:lv3: /dev/sdb1 From 661de0f3b0e54acc84abeae8323f63be5ca3daa1 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 25 Jul 2017 13:46:22 -0500 Subject: [PATCH 03/14] tests: adds an lvm_osds testing scenario Signed-off-by: Andrew Schoen --- .../functional/centos/7/lvm-osds/Vagrantfile | 1 + .../centos/7/lvm-osds/group_vars/all | 21 ++++++ tests/functional/centos/7/lvm-osds/hosts | 5 ++ .../centos/7/lvm-osds/vagrant_variables.yml | 74 +++++++++++++++++++ tests/functional/lvm_setup.yml | 18 +++++ tox.ini | 6 +- 6 files changed, 124 insertions(+), 1 deletion(-) create mode 120000 tests/functional/centos/7/lvm-osds/Vagrantfile create mode 100644 tests/functional/centos/7/lvm-osds/group_vars/all create mode 100644 tests/functional/centos/7/lvm-osds/hosts create mode 100644 tests/functional/centos/7/lvm-osds/vagrant_variables.yml create mode 100644 tests/functional/lvm_setup.yml diff --git a/tests/functional/centos/7/lvm-osds/Vagrantfile b/tests/functional/centos/7/lvm-osds/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/lvm-osds/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/lvm-osds/group_vars/all b/tests/functional/centos/7/lvm-osds/group_vars/all new file mode 100644 index 000000000..d3fe977ee --- /dev/null +++ b/tests/functional/centos/7/lvm-osds/group_vars/all @@ -0,0 +1,21 @@ +--- + +ceph_stable: True +cluster: ceph +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +lvm_osds: true +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + test_volume: /dev/sdb +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/lvm-osds/hosts b/tests/functional/centos/7/lvm-osds/hosts new file mode 100644 index 000000000..f6a265ab3 --- /dev/null +++ b/tests/functional/centos/7/lvm-osds/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/lvm-osds/vagrant_variables.yml b/tests/functional/centos/7/lvm-osds/vagrant_variables.yml new file mode 100644 index 000000000..cbb343842 --- /dev/null +++ b/tests/functional/centos/7/lvm-osds/vagrant_variables.yml @@ -0,0 +1,74 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml new file mode 100644 index 000000000..42e36d920 --- /dev/null +++ b/tests/functional/lvm_setup.yml @@ -0,0 +1,18 @@ +--- + +- hosts: osds + gather_facts: false + become: yes + tasks: + + - name: create physical volume + command: pvcreate /dev/sda + failed_when: false + + - name: create volume group + command: vgcreate test_group /dev/sda + failed_when: false + + - name: create logical volume + command: lvcreate --yes -l 100%FREE -n test_volume test_group + failed_when: false diff --git a/tox.ini b/tox.ini index 15758c546..c22f14736 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = {dev,jewel,luminous,rhcs}-{ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster} - {dev,luminous}-{ansible2.2}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation} + {dev,luminous}-{ansible2.2}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds} skipsdist = True @@ -82,6 +82,7 @@ setenv= luminous: CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04 luminous: UPDATE_CEPH_STABLE_RELEASE = luminous luminous: UPDATE_CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04 + lvm_osds: CEPH_STABLE_RELEASE = luminous deps= ansible1.9: ansible==1.9.4 ansible2.1: ansible==2.1 @@ -117,6 +118,7 @@ changedir= bluestore_docker_cluster: {toxinidir}/tests/functional/centos/7/bs-docker bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col + lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds commands= rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup" @@ -125,6 +127,8 @@ commands= vagrant up --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml + rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup" ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ From d8aea71e312343a7f7679c30fa5691f074260898 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 25 Jul 2017 14:37:25 -0500 Subject: [PATCH 04/14] tests: lvm_osds does not have devices defined This means that our tests needs to use the lvm_volumes dictionary instead of devices. Signed-off-by: Andrew Schoen --- tests/conftest.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 0bcc99238..cc3b851a7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -43,7 +43,9 @@ def node(Ansible, Interface, Command, request): address = Interface("eth1").addresses[0] subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) num_mons = len(ansible_vars["groups"]["mons"]) - num_devices = len(ansible_vars["devices"]) + num_devices = len(ansible_vars.get("devices", [])) + if not num_devices: + num_devices = len(ansible_vars.get("lvm_volumes", [])) num_osd_hosts = len(ansible_vars["groups"]["osds"]) total_osds = num_devices * num_osd_hosts cluster_name = ansible_vars.get("cluster", "ceph") @@ -58,7 +60,7 @@ def node(Ansible, Interface, Command, request): osd_ids = cmd.stdout.rstrip("\n").split("\n") osds = osd_ids if docker: - osds = [device.split("/")[-1] for device in ansible_vars["devices"]] + osds = [device.split("/")[-1] for device in ansible_vars.get("devices", [])] data = dict( address=address, From 63b7e3d36ca14348b806c16d98c64892c93226df Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 25 Jul 2017 16:48:13 -0500 Subject: [PATCH 05/14] lvm_osds: ensure osd daemons are started Signed-off-by: Andrew Schoen --- roles/ceph-osd/tasks/activate_osds.yml | 18 ------------------ roles/ceph-osd/tasks/main.yml | 7 +++++++ roles/ceph-osd/tasks/start_osds.yml | 19 +++++++++++++++++++ 3 files changed, 26 insertions(+), 18 deletions(-) create mode 100644 roles/ceph-osd/tasks/start_osds.yml diff --git a/roles/ceph-osd/tasks/activate_osds.yml b/roles/ceph-osd/tasks/activate_osds.yml index 79b99c139..f420dfdaf 100644 --- a/roles/ceph-osd/tasks/activate_osds.yml +++ b/roles/ceph-osd/tasks/activate_osds.yml @@ -81,21 +81,3 @@ - not item.0.get("skipped") - item.0.get("rc", 0) == 0 - not osd_auto_discovery - -- include: osd_fragment.yml - when: crush_location - -- name: get osd id - shell: | - ls /var/lib/ceph/osd/ | sed 's/.*-//' - changed_when: false - failed_when: false - always_run: true - register: osd_id - -- name: start and add that the osd service(s) to the init sequence - service: - name: ceph-osd@{{ item }} - state: started - with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}" - changed_when: false diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 299d7f933..56d4c7d20 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -28,6 +28,13 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False +- name: ensure OSD daemons are started + include: start_osds.yml + when: + - not containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False + - include: ./docker/main.yml when: containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml new file mode 100644 index 000000000..207902d8f --- /dev/null +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -0,0 +1,19 @@ +--- + +- include: osd_fragment.yml + when: crush_location + +- name: get osd id + shell: | + ls /var/lib/ceph/osd/ | sed 's/.*-//' + changed_when: false + failed_when: false + always_run: true + register: osd_id + +- name: start and add that the osd service(s) to the init sequence + service: + name: ceph-osd@{{ item }} + state: started + with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}" + changed_when: false From 66df80d600d96032b8f0d0d243fcba516a071f73 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 26 Jul 2017 09:29:50 -0500 Subject: [PATCH 06/14] tests: do not use sudo with dev_setup.yml This causes problems when the tests are run locally and not in the CI Signed-off-by: Andrew Schoen --- tests/functional/dev_setup.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/dev_setup.yml b/tests/functional/dev_setup.yml index 72bdf6368..bf07351c6 100644 --- a/tests/functional/dev_setup.yml +++ b/tests/functional/dev_setup.yml @@ -1,7 +1,7 @@ --- - hosts: localhost gather_facts: false - become: yes + become: no tags: - vagrant_setup tasks: From 249598ff4b0a88795c1b6f0b7b8d586061e7e678 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 26 Jul 2017 10:28:53 -0500 Subject: [PATCH 07/14] lvm-osds: update group_vars/osds.yml.sample Signed-off-by: Andrew Schoen --- group_vars/osds.yml.sample | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index ae06510ff..67bff173a 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -195,6 +195,19 @@ dummy: # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" #bluestore_wal_devices: "{{ dedicated_devices }}" +# VII. Use ceph-volume to create OSDs from logical volumes. +# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# when using lvm, not collocated journals. +# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. +# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. +# For example: +# lvm_volumes: +# data-lv1: journal-lv1 +# data-lv2: /dev/sda +# data:lv3: /dev/sdb1 +#lvm_osds: false +#lvm_volumes: {} + ########## # DOCKER # From 61d63f84681af97c093923854d8d73a48177dedf Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 26 Jul 2017 11:23:32 -0500 Subject: [PATCH 08/14] lvm-osds: make task name and files consistent Removes capitilization and newlines to keep these files consistent in style with the existing tasks. Signed-off-by: Andrew Schoen --- roles/ceph-osd/tasks/main.yml | 4 ++-- roles/ceph-osd/tasks/scenarios/lvm.yml | 3 +-- roles/ceph-osd/tasks/start_osds.yml | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 56d4c7d20..2dc9d5e84 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -20,7 +20,7 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- name: create lvm OSDs with ceph-volume +- name: create lvm osds with ceph-volume include: ./scenarios/lvm.yml when: - lvm_osds @@ -28,7 +28,7 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- name: ensure OSD daemons are started +- name: ensure osd daemons are started include: start_osds.yml when: - not containerized_deployment diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml index 7d439aa33..daa457c7c 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -1,5 +1,4 @@ --- - -- name: use ceph-volume to create filestore OSDs with dedicated journals +- name: use ceph-volume to create filestore osds with dedicated journals command: "ceph-volume lvm create --filestore --data {{ item.key }} --journal {{ item.value }}" with_dict: "{{ lvm_volumes }}" diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index 207902d8f..4b2e1fcad 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -1,5 +1,4 @@ --- - - include: osd_fragment.yml when: crush_location From 96c92a154ee578ed225714a3310ab98198528c20 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Wed, 26 Jul 2017 11:26:57 -0500 Subject: [PATCH 09/14] lvm-osds: check for osd_objectstore == 'filestore' ceph-volume currently only has support for filestore, not bluestore Signed-off-by: Andrew Schoen --- roles/ceph-osd/tasks/check_mandatory_vars.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index 353be99ee..b62ae0633 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -56,6 +56,16 @@ - not osd_auto_discovery - lvm_volumes|length == 0 +- name: verify osd_objectstore is 'filestore' when using lvm_osds + fail: + msg: "the lvm_osds scenario currently only works for filestore, not bluestore" + when: + - osd_group_name is defined + - osd_group_name in group_names + - lvm_osds + - not osd_auto_discovery + - osd_objectstore != 'filestore' + - name: make sure the lvm_volumes variable is a dictionary fail: msg: "lvm_volumes: must be a dictionary" From 3b5a06bb3c6e89ddc332f5ba9925c8db52f9a58b Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 27 Jul 2017 10:04:53 -0500 Subject: [PATCH 10/14] lvm-osds: reorder mandatory vars checks Signed-off-by: Andrew Schoen --- roles/ceph-osd/tasks/check_mandatory_vars.yml | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index b62ae0633..0cd1ddbf3 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -46,15 +46,15 @@ - not osd_auto_discovery - devices|length == 0 -- name: verify lvm_volumes have been provided +- name: check if lvm_osds is supported by the selected ceph version fail: - msg: "please provide lvm_volumes to your osd scenario" + msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above." when: - osd_group_name is defined - osd_group_name in group_names + - not containerized_deployment - lvm_osds - - not osd_auto_discovery - - lvm_volumes|length == 0 + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - name: verify osd_objectstore is 'filestore' when using lvm_osds fail: @@ -66,6 +66,16 @@ - not osd_auto_discovery - osd_objectstore != 'filestore' +- name: verify lvm_volumes have been provided + fail: + msg: "please provide lvm_volumes to your osd scenario" + when: + - osd_group_name is defined + - osd_group_name in group_names + - lvm_osds + - not osd_auto_discovery + - lvm_volumes|length == 0 + - name: make sure the lvm_volumes variable is a dictionary fail: msg: "lvm_volumes: must be a dictionary" @@ -117,13 +127,3 @@ - not containerized_deployment - osd_objectstore == 'bluestore' - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - -- name: check if lvm_osds is supported by the selected ceph version - fail: - msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above." - when: - - osd_group_name is defined - - osd_group_name in group_names - - not containerized_deployment - - lvm_osds - - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous From e597628be9edbc6ba058c38448ac0171fb4bb324 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 3 Aug 2017 10:08:34 -0500 Subject: [PATCH 11/14] lvm: update scenario for new osd_scenario variable Signed-off-by: Andrew Schoen --- docs/source/osds/scenarios.rst | 18 ++++++++++-------- group_vars/osds.yml.sample | 6 +++--- roles/ceph-osd/defaults/main.yml | 7 ++++--- roles/ceph-osd/tasks/check_mandatory_vars.yml | 16 ++++++++-------- roles/ceph-osd/tasks/main.yml | 2 +- roles/ceph-osd/tasks/start_osds.yml | 2 +- .../centos/7/lvm-osds/group_vars/all | 2 +- 7 files changed, 28 insertions(+), 25 deletions(-) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index 03c46a2d1..6ba834c1d 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -1,26 +1,28 @@ OSD Scenarios ============= -lvm_osds --------- +lvm +--- This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and -is only available when the ceph release is Luminous or greater. +is only available when the ceph release is Luminous or newer. .. note:: - The creation of the logical volumes is not supported by ceph-ansible, ceph-volume + The creation of the logical volumes is not supported by ``ceph-ansible``, ``ceph-volume`` only creates OSDs from existing logical volumes. -Use ``lvm_osds:true`` to enable this scenario. Currently we only support dedicated journals +Use ``osd_scenario: lvm`` to enable this scenario. Currently we only support dedicated journals when using lvm, not collocated journals. To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a dictionary whose key/value pairs represent a data lv and a journal pair. Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. -For example, a configuration to use ``lvm_osds`` would look like:: - - lvm_osds: true +.. note:: + Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path. +For example, a configuration to use the ``lvm`` osd scenario would look like:: + + osd_scenario: lvm lvm_volumes: data-lv1: journal-lv1 data-lv2: /dev/sda diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 67bff173a..2cf8e1292 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -114,6 +114,7 @@ dummy: #valid_osd_scenarios: # - collocated # - non-collocated +# - lvm # II. Second scenario: non-collocated @@ -195,8 +196,8 @@ dummy: # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" #bluestore_wal_devices: "{{ dedicated_devices }}" -# VII. Use ceph-volume to create OSDs from logical volumes. -# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# III. Use ceph-volume to create OSDs from logical volumes. +# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals # when using lvm, not collocated journals. # lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. # Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. @@ -205,7 +206,6 @@ dummy: # data-lv1: journal-lv1 # data-lv2: /dev/sda # data:lv3: /dev/sdb1 -#lvm_osds: false #lvm_volumes: {} diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index fe4283450..30a087cf6 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -106,6 +106,7 @@ osd_scenario: dummy valid_osd_scenarios: - collocated - non-collocated + - lvm # II. Second scenario: non-collocated @@ -187,17 +188,17 @@ dedicated_devices: [] # /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" bluestore_wal_devices: "{{ dedicated_devices }}" -# VII. Use ceph-volume to create OSDs from logical volumes. -# Use 'lvm_osds:true' to enable this scenario. Currently we only support dedicated journals +# III. Use ceph-volume to create OSDs from logical volumes. +# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals # when using lvm, not collocated journals. # lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair. +# Any logical volume or logical group used must be a name and not a path. # Journals can be either a lv, device or partition. You can not use the same journal for many data lvs. # For example: # lvm_volumes: # data-lv1: journal-lv1 # data-lv2: /dev/sda # data:lv3: /dev/sdb1 -lvm_osds: false lvm_volumes: {} diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index 0cd1ddbf3..516542c0a 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -46,23 +46,23 @@ - not osd_auto_discovery - devices|length == 0 -- name: check if lvm_osds is supported by the selected ceph version +- name: check if osd_scenario lvm is supported by the selected ceph version fail: - msg: "lvm_osds is not supported by the selected Ceph version, use Luminous or above." + msg: "osd_scenario lvm is not supported by the selected Ceph version, use Luminous or newer." when: - osd_group_name is defined - osd_group_name in group_names - not containerized_deployment - - lvm_osds + - osd_scenario == "lvm" - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous -- name: verify osd_objectstore is 'filestore' when using lvm_osds +- name: verify osd_objectstore is 'filestore' when using the lvm osd_scenario fail: - msg: "the lvm_osds scenario currently only works for filestore, not bluestore" + msg: "the lvm osd_scenario currently only works for filestore, not bluestore" when: - osd_group_name is defined - osd_group_name in group_names - - lvm_osds + - osd_scenario == "lvm" - not osd_auto_discovery - osd_objectstore != 'filestore' @@ -72,7 +72,7 @@ when: - osd_group_name is defined - osd_group_name in group_names - - lvm_osds + - osd_scenario == "lvm" - not osd_auto_discovery - lvm_volumes|length == 0 @@ -83,7 +83,7 @@ - osd_group_name is defined - osd_group_name in group_names - not osd_auto_discovery - - lvm_osds + - osd_scenario == "lvm" - lvm_volumes is not mapping - name: make sure the devices variable is a list diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 2dc9d5e84..1784e27fc 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -23,7 +23,7 @@ - name: create lvm osds with ceph-volume include: ./scenarios/lvm.yml when: - - lvm_osds + - osd_scenario == 'lvm' - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index 4b2e1fcad..b0978fc81 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -10,7 +10,7 @@ always_run: true register: osd_id -- name: start and add that the osd service(s) to the init sequence +- name: ensure osd daemons are started service: name: ceph-osd@{{ item }} state: started diff --git a/tests/functional/centos/7/lvm-osds/group_vars/all b/tests/functional/centos/7/lvm-osds/group_vars/all index d3fe977ee..36d6bbf7b 100644 --- a/tests/functional/centos/7/lvm-osds/group_vars/all +++ b/tests/functional/centos/7/lvm-osds/group_vars/all @@ -7,7 +7,7 @@ cluster_network: "192.168.4.0/24" monitor_interface: eth1 journal_size: 100 osd_objectstore: "filestore" -lvm_osds: true +osd_scenario: lvm copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sda lvm_volumes: From 1d5f8767291fbe73ab618b4bb9f4eb54216b3b99 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 3 Aug 2017 17:03:04 -0500 Subject: [PATCH 12/14] ceph-osd: devices is not required when osd_scenario == lvm Signed-off-by: Andrew Schoen --- roles/ceph-osd/tasks/check_mandatory_vars.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index 516542c0a..384738d8d 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -44,6 +44,7 @@ - osd_group_name is defined - osd_group_name in group_names - not osd_auto_discovery + - not osd_scenario == "lvm" - devices|length == 0 - name: check if osd_scenario lvm is supported by the selected ceph version From d1c7ec81c1ad625ef259f2ae17ad01dc087dcf1c Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 3 Aug 2017 10:43:05 -0500 Subject: [PATCH 13/14] ceph-common: move release-rhs.yml after ceph_version is set These tasks needs to be run after we set ceph_version or they fail because it's undefined. Signed-off-by: Andrew Schoen --- roles/ceph-common/tasks/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 5ecc9a6a2..c19ee0c77 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -84,12 +84,6 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory -- include: ./release-rhcs.yml - when: (ceph_rhcs or ceph_dev) - tags: - - always - - include: ./misc/ntp_redhat.yml when: - ansible_os_family == 'RedHat' @@ -113,6 +107,12 @@ - set_fact: ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" +# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory +- include: ./release-rhcs.yml + when: (ceph_rhcs or ceph_dev) + tags: + - always + - include: facts_mon_fsid.yml run_once: true when: From be78bc1a9069e1be98edaf536eaa483720b8a552 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 3 Aug 2017 11:49:18 -0500 Subject: [PATCH 14/14] ceph-defaults: fix containerized osd restarts This needs to check `containerized_deployment` because socket_osd_container is undefined otherwise. Signed-off-by: Andrew Schoen --- roles/ceph-defaults/handlers/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/ceph-defaults/handlers/main.yml b/roles/ceph-defaults/handlers/main.yml index e8659f904..f661e3b1c 100644 --- a/roles/ceph-defaults/handlers/main.yml +++ b/roles/ceph-defaults/handlers/main.yml @@ -44,6 +44,7 @@ when: # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`) # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified + - containerized_deployment - ((crush_location is defined and crush_location) or item.get('rc') == 0) - handler_health_osd_check # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below