mirror of https://github.com/ceph/ceph-ansible.git
Merge pull request #1841 from ceph/lvm-partitions
lvm-osds: test with a partition and an lv as journalspull/1844/head
commit
29df79e54e
|
@ -2,3 +2,11 @@
|
|||
- name: use ceph-volume to create filestore osds with dedicated journals
|
||||
command: "ceph-volume lvm create --filestore --data {{ item.data_vg }}/{{ item.data }} --journal {{ item.journal }}"
|
||||
with_items: "{{ lvm_volumes }}"
|
||||
when:
|
||||
- item.journal_vg is not defined
|
||||
|
||||
- name: use ceph-volume to create filestore osds with dedicated lv journals
|
||||
command: "ceph-volume lvm create --filestore --data {{ item.data_vg }}/{{ item.data }} --journal {{item.journal_vg }}/{{ item.journal }}"
|
||||
with_items: "{{ lvm_volumes }}"
|
||||
when:
|
||||
- item.journal_vg is defined
|
||||
|
|
|
@ -23,6 +23,9 @@ def node(host, request):
|
|||
if not request.node.get_marker(node_type) and not request.node.get_marker('all'):
|
||||
pytest.skip("Not a valid test for node type: %s" % node_type)
|
||||
|
||||
if request.node.get_marker("no_lvm_scenario") and lvm_scenario:
|
||||
pytest.skip("Not a valid test for lvm scenarios")
|
||||
|
||||
if not lvm_scenario and request.node.get_marker("lvm_scenario"):
|
||||
pytest.skip("Not a valid test for non-lvm scenarios")
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
|
||||
ceph_stable: True
|
||||
ceph_origin: repository
|
||||
ceph_repository: community
|
||||
cluster: ceph
|
||||
public_network: "192.168.3.0/24"
|
||||
cluster_network: "192.168.4.0/24"
|
||||
|
@ -12,9 +13,13 @@ osd_scenario: lvm
|
|||
copy_admin_key: true
|
||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
|
||||
lvm_volumes:
|
||||
- data: test_volume
|
||||
journal: /dev/sdc
|
||||
- data: data-lv1
|
||||
journal: /dev/sdc1
|
||||
data_vg: test_group
|
||||
- data: data-lv2
|
||||
journal: journal1
|
||||
data_vg: test_group
|
||||
journal_vg: journals
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
gather_facts: false
|
||||
become: yes
|
||||
tasks:
|
||||
|
||||
|
||||
- name: create physical volume
|
||||
command: pvcreate /dev/sdb
|
||||
failed_when: false
|
||||
|
@ -13,6 +13,45 @@
|
|||
command: vgcreate test_group /dev/sdb
|
||||
failed_when: false
|
||||
|
||||
- name: create logical volume
|
||||
command: lvcreate --yes -l 100%FREE -n test_volume test_group
|
||||
- name: create logical volume 1
|
||||
command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
|
||||
failed_when: false
|
||||
|
||||
- name: create logical volume 2
|
||||
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
|
||||
failed_when: false
|
||||
|
||||
- name: partition /dev/sdc for journals
|
||||
parted:
|
||||
device: /dev/sdc
|
||||
number: 1
|
||||
part_start: 0%
|
||||
part_end: 50%
|
||||
unit: '%'
|
||||
# this is a brand-new, unlabeled disk, so add the label
|
||||
# only for the first partition
|
||||
label: gpt
|
||||
state: present
|
||||
|
||||
- name: partition /dev/sdc for journals
|
||||
parted:
|
||||
device: /dev/sdc
|
||||
number: 2
|
||||
part_start: 50%
|
||||
part_end: 100%
|
||||
unit: '%'
|
||||
state: present
|
||||
|
||||
- name: create filesystem on /dev/sdc1
|
||||
filesystem:
|
||||
fstype: ext4
|
||||
dev: /dev/sdc1
|
||||
|
||||
- name: create journals vg from /dev/sdc2
|
||||
lvg:
|
||||
vg: journals
|
||||
pvs: /dev/sdc2
|
||||
|
||||
- name: create journal1 lv
|
||||
command: lvcreate --yes -l 100%FREE -n journal1 journals
|
||||
failed_when: false
|
||||
|
|
|
@ -22,6 +22,7 @@ class TestOSDs(object):
|
|||
for osd in node["osds"]:
|
||||
assert host.service("ceph-osd@%s" % osd).is_running
|
||||
|
||||
@pytest.mark.no_lvm_scenario
|
||||
def test_osd_services_are_enabled(self, node, host):
|
||||
# TODO: figure out way to paramaterize node['osds'] for this test
|
||||
for osd in node["osds"]:
|
||||
|
|
Loading…
Reference in New Issue