Merge pull request #1803 from ceph/disk-auto

ceph-osd: fix autodetection activation
pull/1868/head
Sébastien Han 2017-09-07 22:51:40 +02:00 committed by GitHub
commit e3386b771d
18 changed files with 226 additions and 143 deletions

View File

@ -2,18 +2,6 @@
# NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
# partition.
- name: automatically activate osd disk(s) without partitions
command: ceph-disk activate "/dev/{{ item.key | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1"
ignore_errors: true
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- osd_scenario == 'collocated'
- osd_auto_discovery
- name: activate osd(s) when device is a disk
command: ceph-disk activate {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
with_together:
@ -24,22 +12,8 @@
register: activate_osd_disk
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- item.0.get("rc", 0) != "0"
- not osd_auto_discovery
- osd_scenario == 'non-collocated'
- name: automatically activate osd disk(s) without partitions (dmcrypt)
command: ceph-disk activate --dmcrypt "/dev/{{ item.key }}"
ignore_errors: true
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- osd_auto_discovery
- dmcrypt
- osd_scenario == 'collocated'
- name: activate osd(s) when device is a disk (dmcrypt)
command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
@ -51,10 +25,9 @@
register: activate_osd_disk_dmcrypt
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- item.0.get("rc", 0) != "0"
- not osd_auto_discovery
- dmcrypt
- osd_scenario == 'non-collocated'
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
@ -79,5 +52,5 @@
failed_when: false
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) == 0
- item.0.get("rc", 0) == "0"
- not osd_auto_discovery

View File

@ -11,14 +11,6 @@
# for SSD journals.
- include: ./check_devices_static.yml
when:
- not osd_auto_discovery
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- include: ./check_devices_auto.yml
when:
- osd_auto_discovery
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False

View File

@ -1,88 +0,0 @@
---
- name: check if the device is a partition (autodiscover disks)
shell: "readlink -f /dev/{{ item.key }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}|fio[a-z]{1,2}[0-9]{1,2}$'"
with_dict: "{{ ansible_devices }}"
changed_when: false
failed_when: false
always_run: true
register: ispartition_results
when:
- ansible_devices is defined
- item.value.removable == 0
- name: check if any of the raw partitions are mounted
shell: "mount |grep -sq '^/dev/{{ item.key }} '"
args:
warn: false
ignore_errors: yes
with_dict: "{{ ansible_devices }}"
register: mount_cmd
changed_when: false
always_run: true
when:
- ansible_devices is defined
- item.value.removable == 0
- item.value.partitions|count == 0
- item.value.holders|count == 0
- name: fail if any of the raw partitions are mounted
fail:
msg: "OSD device autodetection failed because one or more raw partitions is mounted on the host."
with_items: "{{ mount_cmd.results }}"
when:
- not item.get("skipped")
- item.rc == 0
- name: check the partition status of the osd disks (autodiscover disks)
shell: "parted --script /dev/{{ item.key }} print > /dev/null 2>&1"
with_dict: "{{ ansible_devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_partition_status_results
when:
- ansible_devices is defined
- item.value.removable == 0
- item.value.partitions|count == 0
- item.value.holders|count == 0
- name: fix partitions gpt header or labels of the osd disks (autodiscover disks)
shell: "sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}' || sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}'"
with_together:
- "{{ osd_partition_status_results.results }}"
- "{{ ansible_devices }}"
changed_when: false
when:
- ansible_devices is defined
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.value.removable == 0
- item.1.value.partitions|count == 0
- item.1.value.holders|count == 0
- not containerized_deployment
- name: create gpt disk label
command: parted --script {{ item.1 }} mklabel gpt
with_together:
- "{{ osd_partition_status_results.results }}"
- "{{ ansible_devices }}"
changed_when: false
when:
- ansible_devices is defined
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.value.removable == 0
- item.1.value.partitions|count == 0
- item.1.value.holders|count == 0
- containerized_deployment
- name: check if a partition named 'ceph' exists (autodiscover disks)
shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'"
with_dict: "{{ ansible_devices }}"
changed_when: false
failed_when: false
always_run: true
register: parted_results
when:
- ansible_devices is defined
- item.value.removable == 0

View File

@ -8,6 +8,17 @@
- include: ceph_disk_cli_options_facts.yml
- name: generate device list when osd_auto_discovery
set_fact:
devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- osd_auto_discovery
- include: check_devices.yml
- include: ./scenarios/collocated.yml

View File

@ -55,20 +55,6 @@
- osd_auto_discovery
- containerized_deployment
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) without partitions with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
when:
- ansible_devices is defined
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- osd_auto_discovery
- not containerized_deployment
- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
with_together:
@ -80,7 +66,6 @@
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- not osd_auto_discovery
- not containerized_deployment
- include: ../activate_osds.yml

View File

@ -19,6 +19,7 @@ def node(host, request):
ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "kraken")
node_type = ansible_vars["group_names"][0]
docker = ansible_vars.get("docker")
osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
if not request.node.get_marker(node_type) and not request.node.get_marker('all'):
pytest.skip("Not a valid test for node type: %s" % node_type)
@ -50,7 +51,10 @@ def node(host, request):
address = host.interface("eth1").addresses[0]
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"])
num_devices = len(ansible_vars.get("devices", []))
if osd_auto_discovery:
num_devices = 3
else:
num_devices = len(ansible_vars.get("devices", []))
if not num_devices:
num_devices = len(ansible_vars.get("lvm_volumes", []))
num_osd_hosts = len(ansible_vars["groups"]["osds"])

View File

@ -0,0 +1 @@
../../../../../Vagrantfile

View File

@ -0,0 +1,21 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: filestore
dmcrypt: true
osd_auto_discovery: true
osd_scenario: collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -0,0 +1,4 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,69 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.3
cluster_subnet: 192.168.4
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1 @@
../../../../../Vagrantfile

View File

@ -0,0 +1,20 @@
---
ceph_origin: repository
ceph_repository: community
cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
monitor_interface: eth1
radosgw_interface: eth1
journal_size: 100
osd_objectstore: filestore
osd_auto_discovery: true
osd_scenario: collocated
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
osd_pool_default_size: 1

View File

@ -0,0 +1,4 @@
---
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -0,0 +1,5 @@
[mons]
mon0
[osds]
osd0

View File

@ -0,0 +1,69 @@
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# Deploy RESTAPI on each of the Monitors
restapi: true
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.3
cluster_subnet: 192.168.4
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }

View File

@ -3,5 +3,8 @@ class TestOSD(object):
def test_osds_are_all_collocated(self, node, host):
# TODO: figure out way to paramaterize node['vars']['devices'] for this test
osd_auto_discovery = node["vars"].get('osd_auto_discovery', False)
if osd_auto_discovery:
node["vars"]["devices"] = ["/dev/sda", "/dev/sdb", "/dev/sdc"] # Hardcoded since we can't retrieve the devices list generated during playbook run
for device in node["vars"]["devices"]:
assert host.check_output("sudo blkid -s PARTLABEL -o value %s2" % device) in ["ceph journal", "ceph block"]

View File

@ -1,6 +1,6 @@
[tox]
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd}
{dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt}
skipsdist = True
@ -152,6 +152,10 @@ changedir=
xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario
journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with disk autodiscovery
journal_collocation_auto: {toxinidir}/tests/functional/centos/7/jrn-col-auto
# tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with encrypted disk autodiscovery
journal_collocation_auto_dmcrypt: {toxinidir}/tests/functional/centos/7/jrn-col-auto-dm
# tests a 1 mon 1 osd centos7 cluster using dmcrypt non-collocated OSD scenario
dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
# tests a 1 mon 1 osd centos7 cluster using dmcrypt collocated OSD scenario