mirror of https://github.com/ceph/ceph-ansible.git
Merge pull request #1716 from ceph/lvm-osds
adds a new OSD scenario to create OSDs with ceph-volumepull/1745/head
commit
9da848abac
|
@ -18,6 +18,12 @@ Testing
|
|||
OSDs
|
||||
====
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
osds/scenarios
|
||||
|
||||
|
||||
MONs
|
||||
====
|
||||
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
OSD Scenarios
|
||||
=============
|
||||
|
||||
lvm
|
||||
---
|
||||
This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and
|
||||
is only available when the ceph release is Luminous or newer.
|
||||
|
||||
.. note::
|
||||
The creation of the logical volumes is not supported by ``ceph-ansible``, ``ceph-volume``
|
||||
only creates OSDs from existing logical volumes.
|
||||
|
||||
Use ``osd_scenario: lvm`` to enable this scenario. Currently we only support dedicated journals
|
||||
when using lvm, not collocated journals.
|
||||
|
||||
To configure this scenario use the ``lvm_volumes`` config option. ``lvm_volumes`` is a dictionary whose
|
||||
key/value pairs represent a data lv and a journal pair. Journals can be either a lv, device or partition.
|
||||
You can not use the same journal for many data lvs.
|
||||
|
||||
.. note::
|
||||
Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path.
|
||||
|
||||
For example, a configuration to use the ``lvm`` osd scenario would look like::
|
||||
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
data-lv1: journal-lv1
|
||||
data-lv2: /dev/sda
|
||||
data:lv3: /dev/sdb1
|
|
@ -114,6 +114,7 @@ dummy:
|
|||
#valid_osd_scenarios:
|
||||
# - collocated
|
||||
# - non-collocated
|
||||
# - lvm
|
||||
|
||||
|
||||
# II. Second scenario: non-collocated
|
||||
|
@ -195,6 +196,18 @@ dummy:
|
|||
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
|
||||
#bluestore_wal_devices: "{{ dedicated_devices }}"
|
||||
|
||||
# III. Use ceph-volume to create OSDs from logical volumes.
|
||||
# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
|
||||
# when using lvm, not collocated journals.
|
||||
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
|
||||
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
|
||||
# For example:
|
||||
# lvm_volumes:
|
||||
# data-lv1: journal-lv1
|
||||
# data-lv2: /dev/sda
|
||||
# data:lv3: /dev/sdb1
|
||||
#lvm_volumes: {}
|
||||
|
||||
|
||||
##########
|
||||
# DOCKER #
|
||||
|
|
|
@ -84,12 +84,6 @@
|
|||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
|
||||
- include: ./release-rhcs.yml
|
||||
when: (ceph_rhcs or ceph_dev)
|
||||
tags:
|
||||
- always
|
||||
|
||||
- include: ./misc/ntp_redhat.yml
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
|
@ -113,6 +107,12 @@
|
|||
- set_fact:
|
||||
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
|
||||
|
||||
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
|
||||
- include: ./release-rhcs.yml
|
||||
when: (ceph_rhcs or ceph_dev)
|
||||
tags:
|
||||
- always
|
||||
|
||||
- include: facts_mon_fsid.yml
|
||||
run_once: true
|
||||
when:
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
when:
|
||||
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
|
||||
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
|
||||
- containerized_deployment
|
||||
- ((crush_location is defined and crush_location) or item.get('rc') == 0)
|
||||
- handler_health_osd_check
|
||||
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
|
||||
|
|
|
@ -106,6 +106,7 @@ osd_scenario: dummy
|
|||
valid_osd_scenarios:
|
||||
- collocated
|
||||
- non-collocated
|
||||
- lvm
|
||||
|
||||
|
||||
# II. Second scenario: non-collocated
|
||||
|
@ -187,6 +188,19 @@ dedicated_devices: []
|
|||
# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
|
||||
bluestore_wal_devices: "{{ dedicated_devices }}"
|
||||
|
||||
# III. Use ceph-volume to create OSDs from logical volumes.
|
||||
# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
|
||||
# when using lvm, not collocated journals.
|
||||
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
|
||||
# Any logical volume or logical group used must be a name and not a path.
|
||||
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
|
||||
# For example:
|
||||
# lvm_volumes:
|
||||
# data-lv1: journal-lv1
|
||||
# data-lv2: /dev/sda
|
||||
# data:lv3: /dev/sdb1
|
||||
lvm_volumes: {}
|
||||
|
||||
|
||||
##########
|
||||
# DOCKER #
|
||||
|
|
|
@ -81,21 +81,3 @@
|
|||
- not item.0.get("skipped")
|
||||
- item.0.get("rc", 0) == 0
|
||||
- not osd_auto_discovery
|
||||
|
||||
- include: osd_fragment.yml
|
||||
when: crush_location
|
||||
|
||||
- name: get osd id
|
||||
shell: |
|
||||
ls /var/lib/ceph/osd/ | sed 's/.*-//'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
always_run: true
|
||||
register: osd_id
|
||||
|
||||
- name: start and add that the osd service(s) to the init sequence
|
||||
service:
|
||||
name: ceph-osd@{{ item }}
|
||||
state: started
|
||||
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
|
||||
changed_when: false
|
||||
|
|
|
@ -44,8 +44,49 @@
|
|||
- osd_group_name is defined
|
||||
- osd_group_name in group_names
|
||||
- not osd_auto_discovery
|
||||
- not osd_scenario == "lvm"
|
||||
- devices|length == 0
|
||||
|
||||
- name: check if osd_scenario lvm is supported by the selected ceph version
|
||||
fail:
|
||||
msg: "osd_scenario lvm is not supported by the selected Ceph version, use Luminous or newer."
|
||||
when:
|
||||
- osd_group_name is defined
|
||||
- osd_group_name in group_names
|
||||
- not containerized_deployment
|
||||
- osd_scenario == "lvm"
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
|
||||
|
||||
- name: verify osd_objectstore is 'filestore' when using the lvm osd_scenario
|
||||
fail:
|
||||
msg: "the lvm osd_scenario currently only works for filestore, not bluestore"
|
||||
when:
|
||||
- osd_group_name is defined
|
||||
- osd_group_name in group_names
|
||||
- osd_scenario == "lvm"
|
||||
- not osd_auto_discovery
|
||||
- osd_objectstore != 'filestore'
|
||||
|
||||
- name: verify lvm_volumes have been provided
|
||||
fail:
|
||||
msg: "please provide lvm_volumes to your osd scenario"
|
||||
when:
|
||||
- osd_group_name is defined
|
||||
- osd_group_name in group_names
|
||||
- osd_scenario == "lvm"
|
||||
- not osd_auto_discovery
|
||||
- lvm_volumes|length == 0
|
||||
|
||||
- name: make sure the lvm_volumes variable is a dictionary
|
||||
fail:
|
||||
msg: "lvm_volumes: must be a dictionary"
|
||||
when:
|
||||
- osd_group_name is defined
|
||||
- osd_group_name in group_names
|
||||
- not osd_auto_discovery
|
||||
- osd_scenario == "lvm"
|
||||
- lvm_volumes is not mapping
|
||||
|
||||
- name: make sure the devices variable is a list
|
||||
fail:
|
||||
msg: "devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]"
|
||||
|
|
|
@ -20,6 +20,21 @@
|
|||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: create lvm osds with ceph-volume
|
||||
include: ./scenarios/lvm.yml
|
||||
when:
|
||||
- osd_scenario == 'lvm'
|
||||
- not containerized_deployment
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: ensure osd daemons are started
|
||||
include: start_osds.yml
|
||||
when:
|
||||
- not containerized_deployment
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./docker/main.yml
|
||||
when: containerized_deployment
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
- name: use ceph-volume to create filestore osds with dedicated journals
|
||||
command: "ceph-volume lvm create --filestore --data {{ item.key }} --journal {{ item.value }}"
|
||||
with_dict: "{{ lvm_volumes }}"
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
- include: osd_fragment.yml
|
||||
when: crush_location
|
||||
|
||||
- name: get osd id
|
||||
shell: |
|
||||
ls /var/lib/ceph/osd/ | sed 's/.*-//'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
always_run: true
|
||||
register: osd_id
|
||||
|
||||
- name: ensure osd daemons are started
|
||||
service:
|
||||
name: ceph-osd@{{ item }}
|
||||
state: started
|
||||
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
|
||||
changed_when: false
|
|
@ -43,7 +43,9 @@ def node(Ansible, Interface, Command, request):
|
|||
address = Interface("eth1").addresses[0]
|
||||
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
|
||||
num_mons = len(ansible_vars["groups"]["mons"])
|
||||
num_devices = len(ansible_vars["devices"])
|
||||
num_devices = len(ansible_vars.get("devices", []))
|
||||
if not num_devices:
|
||||
num_devices = len(ansible_vars.get("lvm_volumes", []))
|
||||
num_osd_hosts = len(ansible_vars["groups"]["osds"])
|
||||
total_osds = num_devices * num_osd_hosts
|
||||
cluster_name = ansible_vars.get("cluster", "ceph")
|
||||
|
@ -58,7 +60,7 @@ def node(Ansible, Interface, Command, request):
|
|||
osd_ids = cmd.stdout.rstrip("\n").split("\n")
|
||||
osds = osd_ids
|
||||
if docker:
|
||||
osds = [device.split("/")[-1] for device in ansible_vars["devices"]]
|
||||
osds = [device.split("/")[-1] for device in ansible_vars.get("devices", [])]
|
||||
|
||||
data = dict(
|
||||
address=address,
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
|
||||
ceph_stable: True
|
||||
cluster: ceph
|
||||
public_network: "192.168.3.0/24"
|
||||
cluster_network: "192.168.4.0/24"
|
||||
monitor_interface: eth1
|
||||
journal_size: 100
|
||||
osd_objectstore: "filestore"
|
||||
osd_scenario: lvm
|
||||
copy_admin_key: true
|
||||
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
|
||||
lvm_volumes:
|
||||
test_volume: /dev/sdb
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
|
@ -0,0 +1,5 @@
|
|||
[mons]
|
||||
mon0
|
||||
|
||||
[osds]
|
||||
osd0
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: false
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 1
|
||||
osd_vms: 1
|
||||
mds_vms: 0
|
||||
rgw_vms: 0
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 0
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# INSTALL SOURCE OF CEPH
|
||||
# valid values are 'stable' and 'dev'
|
||||
ceph_install_source: stable
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.3
|
||||
cluster_subnet: 192.168.4
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 512
|
||||
|
||||
# Ethernet interface name
|
||||
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
|
||||
eth: 'eth1'
|
||||
|
||||
# Disks
|
||||
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
||||
# now):
|
||||
#
|
||||
# * ceph/ubuntu-xenial
|
||||
#
|
||||
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||
# libvirt CentOS: centos/7
|
||||
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||
# For more boxes have a look at:
|
||||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||
vagrant_box: centos/7
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
#vagrant_sync_dir: /
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
||||
# VAGRANT URL
|
||||
# This is a URL to download an image from an alternate location. vagrant_box
|
||||
# above should be set to the filename of the image.
|
||||
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: yes
|
||||
become: no
|
||||
tags:
|
||||
- vagrant_setup
|
||||
tasks:
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
|
||||
- hosts: osds
|
||||
gather_facts: false
|
||||
become: yes
|
||||
tasks:
|
||||
|
||||
- name: create physical volume
|
||||
command: pvcreate /dev/sda
|
||||
failed_when: false
|
||||
|
||||
- name: create volume group
|
||||
command: vgcreate test_group /dev/sda
|
||||
failed_when: false
|
||||
|
||||
- name: create logical volume
|
||||
command: lvcreate --yes -l 100%FREE -n test_volume test_group
|
||||
failed_when: false
|
6
tox.ini
6
tox.ini
|
@ -1,6 +1,6 @@
|
|||
[tox]
|
||||
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster}
|
||||
{dev,luminous}-{ansible2.2}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation}
|
||||
{dev,luminous}-{ansible2.2}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds}
|
||||
|
||||
skipsdist = True
|
||||
|
||||
|
@ -82,6 +82,7 @@ setenv=
|
|||
luminous: CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04
|
||||
luminous: UPDATE_CEPH_STABLE_RELEASE = luminous
|
||||
luminous: UPDATE_CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04
|
||||
lvm_osds: CEPH_STABLE_RELEASE = luminous
|
||||
deps=
|
||||
ansible1.9: ansible==1.9.4
|
||||
ansible2.1: ansible==2.1
|
||||
|
@ -117,6 +118,7 @@ changedir=
|
|||
bluestore_docker_cluster: {toxinidir}/tests/functional/centos/7/bs-docker
|
||||
bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn
|
||||
bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col
|
||||
lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
|
||||
|
||||
commands=
|
||||
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
|
||||
|
@ -125,6 +127,8 @@ commands=
|
|||
vagrant up --no-provision {posargs:--provider=virtualbox}
|
||||
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
||||
|
||||
lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
|
||||
|
||||
rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
|
||||
|
||||
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||
|
|
Loading…
Reference in New Issue