ceph-ansible/tests/functional/lvm_setup.yml

68 lines
1.6 KiB
YAML
Raw Normal View History

---
- hosts: osds
gather_facts: false
become: yes
tasks:
- name: create physical volume
command: pvcreate /dev/sdb
failed_when: false
- name: create volume group
command: vgcreate test_group /dev/sdb
failed_when: false
- name: create logical volume 1
command: lvcreate --yes -l 50%FREE -n data-lv1 test_group
failed_when: false
- name: create logical volume 2
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group
failed_when: false
# purge-cluster.yml does not properly destroy partitions
# used for lvm osd journals, this ensures they are removed
# for that testing scenario
- name: remove /dev/sdc1 if it exists
parted:
device: /dev/sdc
number: 1
state: absent
- name: remove /dev/sdc2 if it exists
parted:
device: /dev/sdc
number: 2
state: absent
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
number: 1
part_start: 0%
part_end: 50%
unit: '%'
# this is a brand-new, unlabeled disk, so add the label
# only for the first partition
label: gpt
state: present
- name: partition /dev/sdc for journals
parted:
device: /dev/sdc
number: 2
part_start: 50%
part_end: 100%
unit: '%'
state: present
- name: create journals vg from /dev/sdc2
lvg:
vg: journals
pvs: /dev/sdc2
- name: create journal1 lv
command: lvcreate --yes -l 100%FREE -n journal1 journals
failed_when: false