mirror of https://github.com/ceph/ceph-ansible.git
Merge branch 'master' into wip-rm-calamari
commit
0e63f0f3c9
103
.mergify.yml
103
.mergify.yml
|
@ -1,17 +1,86 @@
|
|||
rules:
|
||||
default:
|
||||
protection:
|
||||
required_status_checks:
|
||||
strict: true
|
||||
contexts:
|
||||
- "Testing: ceph-ansible PR Pipeline"
|
||||
required_pull_request_reviews:
|
||||
required_approving_review_count: 1
|
||||
merge_strategy:
|
||||
method: rebase
|
||||
automated_backport_labels:
|
||||
backport-stable-3.0: stable-3.0
|
||||
backport-stable-3.1: stable-3.1
|
||||
disabling_label: DNM
|
||||
disabling_files:
|
||||
- .mergify.yml
|
||||
pull_request_rules:
|
||||
- name: automatic merge
|
||||
conditions:
|
||||
- label!=DNM
|
||||
- '#approved-reviews-by>=1'
|
||||
- 'status-success=Testing: ceph-ansible PR Pipeline'
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
rebase_fallback: merge
|
||||
strict: smart
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
||||
- name: automatic merge on skip ci
|
||||
conditions:
|
||||
- label!=DNM
|
||||
- title~=\[skip ci\]
|
||||
- '#approved-reviews-by>=1'
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
rebase_fallback: merge
|
||||
strict: smart
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
||||
- name: automerge backport 3.0
|
||||
conditions:
|
||||
- author=mergify[bot]
|
||||
- base=stable-3.0
|
||||
- label!=DNM
|
||||
- 'status-success=Testing: ceph-ansible PR Pipeline'
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
rebase_fallback: merge
|
||||
strict: smart
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
||||
- name: automerge backport 3.1
|
||||
conditions:
|
||||
- author=mergify[bot]
|
||||
- base=stable-3.1
|
||||
- label!=DNM
|
||||
- 'status-success=Testing: ceph-ansible PR Pipeline'
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
rebase_fallback: merge
|
||||
strict: smart
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
||||
- name: automerge backport 3.2
|
||||
conditions:
|
||||
- author=mergify[bot]
|
||||
- base=stable-3.2
|
||||
- label!=DNM
|
||||
- 'status-success=Testing: ceph-ansible PR Pipeline'
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
rebase_fallback: merge
|
||||
strict: smart
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
||||
# Backports
|
||||
- actions:
|
||||
backport:
|
||||
branches:
|
||||
- stable-3.0
|
||||
conditions:
|
||||
- label=backport-stable-3.0
|
||||
name: backport stable-3.0
|
||||
- actions:
|
||||
backport:
|
||||
branches:
|
||||
- stable-3.1
|
||||
conditions:
|
||||
- label=backport-stable-3.1
|
||||
name: backport stable-3.1
|
||||
- actions:
|
||||
backport:
|
||||
branches:
|
||||
- stable-3.2
|
||||
conditions:
|
||||
- label=backport-stable-3.2
|
||||
name: backport stable-3.2
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
# vi: set ft=ruby :
|
||||
|
||||
require 'yaml'
|
||||
require 'time'
|
||||
VAGRANTFILE_API_VERSION = '2'
|
||||
|
||||
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
|
||||
|
@ -33,7 +32,6 @@ DEBUG = settings['debug']
|
|||
|
||||
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
|
||||
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
|
||||
ansible_provision = proc do |ansible|
|
||||
|
@ -516,7 +514,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(0..2).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '50G', :bus => "ide"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :size => '50G', :bus => "ide"
|
||||
end
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
#!/bin/bash
|
||||
|
||||
create_snapshots() {
|
||||
local pattern=$1
|
||||
for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do
|
||||
sudo virsh shutdown "${vm}"
|
||||
wait_for_shutoff "${vm}"
|
||||
sudo virsh snapshot-create "${vm}"
|
||||
sudo virsh start "${vm}"
|
||||
done
|
||||
}
|
||||
|
||||
delete_snapshots() {
|
||||
local pattern=$1
|
||||
for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do
|
||||
for snapshot in $(sudo virsh snapshot-list "${vm}" --name); do
|
||||
echo "deleting snapshot ${snapshot} (vm: ${vm})"
|
||||
sudo virsh snapshot-delete "${vm}" "${snapshot}"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
revert_snapshots() {
|
||||
local pattern=$1
|
||||
for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do
|
||||
echo "restoring last snapshot for ${vm}"
|
||||
sudo virsh snapshot-revert "${vm}" --current
|
||||
sudo virsh start "${vm}"
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_shutoff() {
|
||||
local vm=$1
|
||||
local retries=60
|
||||
local delay=2
|
||||
|
||||
until test "${retries}" -eq 0
|
||||
do
|
||||
echo "waiting for ${vm} to be shut off... #${retries}"
|
||||
sleep "${delay}"
|
||||
let "retries=$retries-1"
|
||||
local current_state=$(sudo virsh domstate "${vm}")
|
||||
test "${current_state}" == "shut off" && return
|
||||
done
|
||||
echo couldnt shutoff "${vm}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
-d|--delete)
|
||||
delete_snapshots "$2"
|
||||
exit
|
||||
;;
|
||||
-i|--interactive)
|
||||
INTERACTIVE=TRUE
|
||||
;;
|
||||
-s|--snapshot)
|
||||
create_snapshots "$2"
|
||||
;;
|
||||
-r|--revert)
|
||||
revert_snapshots "$2"
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
break
|
||||
esac
|
||||
|
||||
shift
|
||||
done
|
|
@ -152,6 +152,26 @@ An example of a validation failure might look like:
|
|||
"changed": false
|
||||
}
|
||||
|
||||
Supported Validation
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``ceph-validate`` role currently supports validation of the proper config for the following
|
||||
osd scenarios:
|
||||
|
||||
- ``collocated``
|
||||
- ``non-collocated``
|
||||
- ``lvm``
|
||||
|
||||
The following install options are also validated by the ``ceph-validate`` role:
|
||||
|
||||
- ``ceph_origin`` set to ``distro``
|
||||
- ``ceph_origin`` set to ``repository``
|
||||
- ``ceph_origin`` set to ``local``
|
||||
- ``ceph_repository`` set to ``rhcs``
|
||||
- ``ceph_repository`` set to ``dev``
|
||||
- ``ceph_repository`` set to ``community``
|
||||
|
||||
|
||||
Installation methods
|
||||
--------------------
|
||||
|
||||
|
@ -202,7 +222,9 @@ selection or other aspects of your cluster.
|
|||
- ``public_network``
|
||||
- ``osd_scenario``
|
||||
- ``monitor_interface`` or ``monitor_address``
|
||||
- ``radosgw_interface`` or ``radosgw_address``
|
||||
|
||||
|
||||
When deploying RGW instance(s) you are required to set the ``radosgw_interface`` or ``radosgw_address`` config option.
|
||||
|
||||
``ceph.conf`` Configuration File
|
||||
---------------------------------
|
||||
|
|
|
@ -41,7 +41,7 @@ UCA repository
|
|||
|
||||
If ``ceph_repository`` is set to ``uca``, packages you will be by default installed from http://ubuntu-cloud.archive.canonical.com/ubuntu, this can be changed by tweaking ``ceph_stable_repo_uca``.
|
||||
You can also decide which OpenStack version the Ceph packages should come from by tweaking ``ceph_stable_openstack_release_uca``.
|
||||
For example, ``ceph_stable_openstack_release_uca: liberty``.
|
||||
For example, ``ceph_stable_openstack_release_uca: queens``.
|
||||
|
||||
Dev repository
|
||||
~~~~~~~~~~~~~~
|
||||
|
|
|
@ -1,12 +1,244 @@
|
|||
OSD Scenarios
|
||||
=============
|
||||
|
||||
The following are all of the available options for the ``osd_scenario`` config
|
||||
setting. Defining an ``osd_scenario`` is mandatory for using ``ceph-ansible``.
|
||||
There are a few *scenarios* that are supported and the differences are mainly
|
||||
based on the Ceph tooling required to provision OSDs, but can also affect how
|
||||
devices are being configured to create an OSD.
|
||||
|
||||
Supported values for the required ``osd_scenario`` variable are:
|
||||
|
||||
* :ref:`collocated <osd_scenario_collocated>`
|
||||
* :ref:`non-collocated <osd_scenario_non_collocated>`
|
||||
* :ref:`lvm <osd_scenario_lvm>`
|
||||
|
||||
Since the Ceph mimic release, it is preferred to use the :ref:`lvm scenario
|
||||
<osd_scenario_lvm>` that uses the ``ceph-volume`` provisioning tool. Any other
|
||||
scenario will cause deprecation warnings.
|
||||
|
||||
All the scenarios mentionned above support both containerized and non-containerized cluster.
|
||||
As a reminder, deploying a containerized cluster can be done by setting ``containerized_deployment``
|
||||
to ``True``.
|
||||
|
||||
.. _osd_scenario_lvm:
|
||||
|
||||
lvm
|
||||
---
|
||||
|
||||
This OSD scenario uses ``ceph-volume`` to create OSDs, primarily using LVM, and
|
||||
is only available when the Ceph release is luminous or newer.
|
||||
|
||||
**It is the preferred method of provisioning OSDs.**
|
||||
|
||||
It is enabled with the following setting::
|
||||
|
||||
|
||||
osd_scenario: lvm
|
||||
|
||||
Other (optional) supported settings:
|
||||
|
||||
- ``osd_objectstore``: Set the Ceph *objectstore* for the OSD. Available options
|
||||
are ``filestore`` or ``bluestore``. You can only select ``bluestore`` with
|
||||
the Ceph release is luminous or greater. Defaults to ``bluestore`` if unset.
|
||||
|
||||
- ``dmcrypt``: Enable Ceph's encryption on OSDs using ``dmcrypt``.
|
||||
Defaults to ``false`` if unset.
|
||||
|
||||
- ``osds_per_device``: Provision more than 1 OSD (the default if unset) per device.
|
||||
|
||||
|
||||
Simple configuration
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
With this approach, most of the decisions on how devices are configured to
|
||||
provision an OSD are made by the Ceph tooling (``ceph-volume lvm batch`` in
|
||||
this case). There is almost no room to modify how the OSD is composed given an
|
||||
input of devices.
|
||||
|
||||
To use this configuration, the ``devices`` option must be populated with the
|
||||
raw device paths that will be used to provision the OSDs.
|
||||
|
||||
|
||||
.. note:: Raw devices must be "clean", without a gpt partition table, or
|
||||
logical volumes present.
|
||||
|
||||
|
||||
For example, for a node that has ``/dev/sda`` and ``/dev/sdb`` intended for
|
||||
Ceph usage, the configuration would be:
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_scenario: lvm
|
||||
devices:
|
||||
- /dev/sda
|
||||
- /dev/sdb
|
||||
|
||||
In the above case, if both devices are spinning drives, 2 OSDs would be
|
||||
created, each with its own collocated journal.
|
||||
|
||||
Other provisioning strategies are possible, by mixing spinning and solid state
|
||||
devices, for example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_scenario: lvm
|
||||
devices:
|
||||
- /dev/sda
|
||||
- /dev/sdb
|
||||
- /dev/nvme0n1
|
||||
|
||||
Similar to the initial example, this would end up producing 2 OSDs, but data
|
||||
would be placed on the slower spinning drives (``/dev/sda``, and ``/dev/sdb``)
|
||||
and journals would be placed on the faster solid state device ``/dev/nvme0n1``.
|
||||
The ``ceph-volume`` tool describes this in detail in
|
||||
`the "batch" subcommand section <http://docs.ceph.com/docs/master/ceph-volume/lvm/batch/>`_
|
||||
|
||||
|
||||
Other (optional) supported settings:
|
||||
|
||||
- ``crush_device_class``: Sets the CRUSH device class for all OSDs created with this
|
||||
method (it is not possible to have a per-OSD CRUSH device class using the *simple*
|
||||
configuration approach). Values *must be* a string, like
|
||||
``crush_device_class: "ssd"``
|
||||
|
||||
|
||||
Advanced configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This configuration is useful when more granular control is wanted when setting
|
||||
up devices and how they should be arranged to provision an OSD. It requires an
|
||||
existing setup of volume groups and logical volumes (``ceph-volume`` will **not**
|
||||
create these).
|
||||
|
||||
To use this configuration, the ``lvm_volumes`` option must be populated with
|
||||
logical volumes and volume groups. Additionally, absolute paths to partitions
|
||||
*can* be used for ``journal``, ``block.db``, and ``block.wal``.
|
||||
|
||||
.. note:: This configuration uses ``ceph-volume lvm create`` to provision OSDs
|
||||
|
||||
Supported ``lvm_volumes`` configuration settings:
|
||||
|
||||
- ``data``: The logical volume name or full path to a raw device (an LV will be
|
||||
created using 100% of the raw device)
|
||||
|
||||
- ``data_vg``: The volume group name, **required** if ``data`` is a logical volume.
|
||||
|
||||
- ``crush_device_class``: CRUSH device class name for the resulting OSD, allows
|
||||
setting set the device class for each OSD, unlike the global ``crush_device_class``
|
||||
that sets them for all OSDs.
|
||||
|
||||
.. note:: If you wish to set the ``crush_device_class`` for the OSDs
|
||||
when using ``devices`` you must set it using the global ``crush_device_class``
|
||||
option as shown above. There is no way to define a specific CRUSH device class
|
||||
per OSD when using ``devices`` like there is for ``lvm_volumes``.
|
||||
|
||||
|
||||
``filestore`` objectstore variables:
|
||||
|
||||
- ``journal``: The logical volume name or full path to a partition.
|
||||
|
||||
- ``journal_vg``: The volume group name, **required** if ``journal`` is a logical volume.
|
||||
|
||||
.. warning:: Each entry must be unique, duplicate values are not allowed
|
||||
|
||||
|
||||
``bluestore`` objectstore variables:
|
||||
|
||||
- ``db``: The logical volume name or full path to a partition.
|
||||
|
||||
- ``db_vg``: The volume group name, **required** if ``db`` is a logical volume.
|
||||
|
||||
- ``wal``: The logical volume name or full path to a partition.
|
||||
|
||||
- ``wal_vg``: The volume group name, **required** if ``wal`` is a logical volume.
|
||||
|
||||
|
||||
.. note:: These ``bluestore`` variables are optional optimizations. Bluestore's
|
||||
``db`` and ``wal`` will only benefit from faster devices. It is possible to
|
||||
create a bluestore OSD with a single raw device.
|
||||
|
||||
.. warning:: Each entry must be unique, duplicate values are not allowed
|
||||
|
||||
|
||||
``bluestore`` example using raw devices:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: bluestore
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: /dev/sda
|
||||
- data: /dev/sdb
|
||||
|
||||
.. note:: Volume groups and logical volumes will be created in this case,
|
||||
utilizing 100% of the devices.
|
||||
|
||||
``bluestore`` example with logical volumes:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: bluestore
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: data-vg1
|
||||
- data: data-lv2
|
||||
data_vg: data-vg2
|
||||
|
||||
.. note:: Volume groups and logical volumes must exist.
|
||||
|
||||
|
||||
``bluestore`` example defining ``wal`` and ``db`` logical volumes:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: bluestore
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: data-vg1
|
||||
db: db-lv1
|
||||
db_vg: db-vg1
|
||||
wal: wal-lv1
|
||||
wal_vg: wal-vg1
|
||||
- data: data-lv2
|
||||
data_vg: data-vg2
|
||||
db: db-lv2
|
||||
db_vg: db-vg2
|
||||
wal: wal-lv2
|
||||
wal_vg: wal-vg2
|
||||
|
||||
.. note:: Volume groups and logical volumes must exist.
|
||||
|
||||
|
||||
``filestore`` example with logical volumes:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: filestore
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: data-vg1
|
||||
journal: journal-lv1
|
||||
journal_vg: journal-vg1
|
||||
- data: data-lv2
|
||||
data_vg: data-vg2
|
||||
journal: journal-lv2
|
||||
journal_vg: journal-vg2
|
||||
|
||||
.. note:: Volume groups and logical volumes must exist.
|
||||
|
||||
|
||||
.. _osd_scenario_collocated:
|
||||
|
||||
collocated
|
||||
----------
|
||||
|
||||
.. warning:: This scenario is deprecated in the Ceph mimic release, and fully
|
||||
removed in newer releases. It is recommended to used the
|
||||
:ref:`lvm scenario <osd_scenario_lvm>` instead
|
||||
|
||||
This OSD scenario uses ``ceph-disk`` to create OSDs with collocated journals
|
||||
from raw devices.
|
||||
|
||||
|
@ -18,7 +250,7 @@ has the following required configuration options:
|
|||
This scenario has the following optional configuration options:
|
||||
|
||||
- ``osd_objectstore``: defaults to ``filestore`` if not set. Available options are ``filestore`` or ``bluestore``.
|
||||
You can only select ``bluestore`` if the Ceph release is Luminous or greater.
|
||||
You can only select ``bluestore`` if the Ceph release is luminous or greater.
|
||||
|
||||
- ``dmcrypt``: defaults to ``false`` if not set.
|
||||
|
||||
|
@ -53,9 +285,16 @@ An example of using the ``collocated`` OSD scenario with encryption would look l
|
|||
- /dev/sda
|
||||
- /dev/sdb
|
||||
|
||||
|
||||
.. _osd_scenario_non_collocated:
|
||||
|
||||
non-collocated
|
||||
--------------
|
||||
|
||||
.. warning:: This scenario is deprecated in the Ceph mimic release, and fully
|
||||
removed in newer releases. It is recommended to used the
|
||||
:ref:`lvm scenario <osd_scenario_lvm>` instead
|
||||
|
||||
This OSD scenario uses ``ceph-disk`` to create OSDs from raw devices with journals that
|
||||
exist on a dedicated device.
|
||||
|
||||
|
@ -69,7 +308,7 @@ This scenario has the following optional configuration options:
|
|||
- ``dedicated_devices``: defaults to ``devices`` if not set
|
||||
|
||||
- ``osd_objectstore``: defaults to ``filestore`` if not set. Available options are ``filestore`` or ``bluestore``.
|
||||
You can only select ``bluestore`` with the Ceph release is Luminous or greater.
|
||||
You can only select ``bluestore`` with the Ceph release is luminous or greater.
|
||||
|
||||
- ``dmcrypt``: defaults to ``false`` if not set.
|
||||
|
||||
|
@ -170,186 +409,3 @@ An example of using the ``non-collocated`` OSD scenario with encryption, bluesto
|
|||
bluestore_wal_devices:
|
||||
- /dev/sdd
|
||||
- /dev/sdd
|
||||
|
||||
lvm
|
||||
---
|
||||
|
||||
This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and
|
||||
is only available when the Ceph release is Luminous or newer.
|
||||
|
||||
|
||||
Configurations
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
``lvm_volumes`` or ``devices`` are the config option that needs to be defined to deploy OSDs
|
||||
with the ``lvm`` osd scenario.
|
||||
|
||||
- ``lvm_volumes`` is a list of dictionaries which expects a volume name and a volume group for
|
||||
logical volumes, but can also accept a partition in the case of ``filestore`` for the ``journal``.
|
||||
If ``lvm_volumes`` is defined then the ``ceph-volume lvm create`` command is used to create each OSD
|
||||
defined in ``lvm_volumes``.
|
||||
|
||||
- ``devices`` is a list of raw device names as strings. If ``devices`` is defined then the ``ceph-volume lvm batch``
|
||||
command will be used to deploy OSDs. You can also use the ``osds_per_device`` variable to inform ``ceph-volume`` how
|
||||
many OSDs it should create from each device it finds suitable.
|
||||
|
||||
Both ``lvm_volumes`` and ``devices`` can be defined and both methods would be used in the deployment or you
|
||||
can pick just one method.
|
||||
|
||||
This scenario supports encrypting your OSDs by setting ``dmcrypt: True``. If set,
|
||||
all OSDs defined in ``lvm_volumes`` will be encrypted.
|
||||
|
||||
The ``data`` key represents the logical volume name, raw device or partition that is to be used for your
|
||||
OSD data. The ``data_vg`` key represents the volume group name that your
|
||||
``data`` logical volume resides on. This key is required for purging of OSDs
|
||||
created by this scenario.
|
||||
|
||||
.. note::
|
||||
|
||||
Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path.
|
||||
|
||||
.. note::
|
||||
|
||||
You can not use the same journal for many OSDs.
|
||||
|
||||
|
||||
``filestore``
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
There is filestore support which can be enabled with:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: filestore
|
||||
|
||||
To configure this scenario use the ``lvm_volumes`` config option.
|
||||
``lvm_volumes`` is a list of dictionaries which expects a volume name and
|
||||
a volume group for logical volumes, but can also accept a parition in the case of
|
||||
``filestore`` for the ``journal``.
|
||||
|
||||
The following keys are accepted for a ``filestore`` deployment:
|
||||
|
||||
* ``data``
|
||||
* ``data_vg`` (not required if ``data`` is a raw device or partition)
|
||||
* ``journal``
|
||||
* ``journal_vg`` (not required if ``journal`` is a partition and not a logical volume)
|
||||
* ``crush_device_class`` (optional, sets the crush device class for the OSD)
|
||||
|
||||
The ``journal`` key represents the logical volume name or partition that will be used for your OSD journal.
|
||||
|
||||
For example, a configuration to use the ``lvm`` osd scenario would look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: filestore
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: vg1
|
||||
journal: journal-lv1
|
||||
journal_vg: vg2
|
||||
crush_device_class: foo
|
||||
- data: data-lv2
|
||||
journal: /dev/sda
|
||||
data_vg: vg1
|
||||
- data: data-lv3
|
||||
journal: /dev/sdb1
|
||||
data_vg: vg2
|
||||
- data: /dev/sda
|
||||
journal: /dev/sdb1
|
||||
- data: /dev/sda1
|
||||
journal: journal-lv1
|
||||
journal_vg: vg2
|
||||
|
||||
For example, a configuration to use the ``lvm`` osd scenario with encryption would look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: filestore
|
||||
osd_scenario: lvm
|
||||
dmcrypt: True
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: vg1
|
||||
journal: journal-lv1
|
||||
journal_vg: vg2
|
||||
crush_device_class: foo
|
||||
|
||||
If you wished to use ``devices`` instead of ``lvm_volumes`` your configuration would look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: filestore
|
||||
osd_scenario: lvm
|
||||
crush_device_class: foo
|
||||
devices:
|
||||
- /dev/sda
|
||||
- /dev/sdc
|
||||
|
||||
.. note::
|
||||
|
||||
If you wish to change set the ``crush_device_class`` for the OSDs when using ``devices`` you must set it
|
||||
using the global ``crush_device_class`` option as shown above. There is no way to define a specific crush device
|
||||
class per OSD when using ``devices`` like there is for ``lvm_volumes``.
|
||||
|
||||
``bluestore``
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
This scenario allows a combination of devices to be used in an OSD.
|
||||
``bluestore`` can work just with a single "block" device (specified by the
|
||||
``data`` and optionally ``data_vg``) or additionally with a ``block.wal`` and ``block.db``
|
||||
(interchangeably)
|
||||
|
||||
The following keys are accepted for a ``bluestore`` deployment:
|
||||
|
||||
* ``data`` (required)
|
||||
* ``data_vg`` (not required if ``data`` is a raw device or partition)
|
||||
* ``db`` (optional for ``block.db``)
|
||||
* ``db_vg`` (optional for ``block.db``)
|
||||
* ``wal`` (optional for ``block.wal``)
|
||||
* ``wal_vg`` (optional for ``block.wal``)
|
||||
* ``crush_device_class`` (optional, sets the crush device class for the OSD)
|
||||
|
||||
A ``bluestore`` lvm deployment, for all four different combinations supported
|
||||
could look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: bluestore
|
||||
osd_scenario: lvm
|
||||
lvm_volumes:
|
||||
- data: data-lv1
|
||||
data_vg: vg1
|
||||
crush_device_class: foo
|
||||
- data: data-lv2
|
||||
data_vg: vg1
|
||||
wal: wal-lv1
|
||||
wal_vg: vg2
|
||||
- data: data-lv3
|
||||
data_vg: vg2
|
||||
db: db-lv1
|
||||
db_vg: vg2
|
||||
- data: data-lv4
|
||||
data_vg: vg4
|
||||
db: db-lv4
|
||||
db_vg: vg4
|
||||
wal: wal-lv4
|
||||
wal_vg: vg4
|
||||
- data: /dev/sda
|
||||
|
||||
If you wished to use ``devices`` instead of ``lvm_volumes`` your configuration would look like:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
osd_objectstore: bluestore
|
||||
osd_scenario: lvm
|
||||
crush_device_class: foo
|
||||
devices:
|
||||
- /dev/sda
|
||||
- /dev/sdc
|
||||
|
||||
.. note::
|
||||
|
||||
If you wish to change set the ``crush_device_class`` for the OSDs when using ``devices`` you must set it
|
||||
using the global ``crush_device_class`` option as shown above. There is no way to define a specific crush device
|
||||
class per OSD when using ``devices`` like there is for ``lvm_volumes``.
|
||||
|
|
|
@ -56,16 +56,10 @@ dummy:
|
|||
#iscsi_gw_group_name: iscsigws
|
||||
#mgr_group_name: mgrs
|
||||
|
||||
# If check_firewall is true, then ansible will try to determine if the
|
||||
# Ceph ports are blocked by a firewall. If the machine running ansible
|
||||
# cannot reach the Ceph ports for some other reason, you may need or
|
||||
# want to set this to False to skip those checks.
|
||||
#check_firewall: False
|
||||
|
||||
# If configure_firewall is true, then ansible will try to configure the
|
||||
# appropriate firewalling rules so that Ceph daemons can communicate
|
||||
# with each others.
|
||||
#configure_firewall: False
|
||||
#configure_firewall: True
|
||||
|
||||
# Open ports on corresponding nodes if firewall is installed on it
|
||||
#ceph_mon_firewall_zone: public
|
||||
|
@ -105,10 +99,15 @@ dummy:
|
|||
# Whether or not to install the ceph-test package.
|
||||
#ceph_test: false
|
||||
|
||||
# Enable the ntp service by default to avoid clock skew on
|
||||
# ceph nodes
|
||||
# Enable the ntp service by default to avoid clock skew on ceph nodes
|
||||
# Disable if an appropriate NTP client is already installed and configured
|
||||
#ntp_service_enabled: true
|
||||
|
||||
# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
|
||||
# Note that this selection is currently ignored on containerized deployments
|
||||
#ntp_daemon_type: timesyncd
|
||||
|
||||
|
||||
# Set uid/gid to default '64045' for bootstrap directories.
|
||||
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
|
||||
# These values have to be set according to the base OS used by the container image, NOT the host.
|
||||
|
@ -214,7 +213,7 @@ dummy:
|
|||
#
|
||||
#
|
||||
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
#ceph_stable_openstack_release_uca: liberty
|
||||
#ceph_stable_openstack_release_uca: queens
|
||||
#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
|
||||
|
||||
# REPOSITORY: openSUSE OBS
|
||||
|
@ -355,8 +354,8 @@ dummy:
|
|||
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
|
||||
|
||||
#cephfs_pools:
|
||||
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
|
||||
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
|
||||
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
|
||||
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
|
||||
|
||||
## OSD options
|
||||
#
|
||||
|
@ -365,12 +364,13 @@ dummy:
|
|||
#non_hci_safety_factor: 0.7
|
||||
#osd_memory_target: 4000000000
|
||||
#journal_size: 5120 # OSD journal size in MB
|
||||
#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
|
||||
#public_network: 0.0.0.0/0
|
||||
#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
|
||||
#osd_mkfs_type: xfs
|
||||
#osd_mkfs_options_xfs: -f -i size=2048
|
||||
#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
|
||||
#osd_objectstore: filestore
|
||||
#osd_objectstore: bluestore
|
||||
|
||||
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
|
||||
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
|
||||
|
@ -495,7 +495,7 @@ dummy:
|
|||
# OS TUNING #
|
||||
#############
|
||||
|
||||
#disable_transparent_hugepage: true
|
||||
#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
|
||||
#os_tuning_params:
|
||||
# - { name: fs.file-max, value: 26234859 }
|
||||
# - { name: vm.zone_reclaim_mode, value: 0 }
|
||||
|
@ -516,6 +516,10 @@ dummy:
|
|||
#ceph_docker_image: "ceph/daemon"
|
||||
#ceph_docker_image_tag: latest
|
||||
#ceph_docker_registry: docker.io
|
||||
## Client only docker image - defaults to {{ ceph_docker_image }}
|
||||
#ceph_client_docker_image: "{{ ceph_docker_image }}"
|
||||
#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
|
||||
#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
|
||||
#ceph_docker_enable_centos_extra_repo: false
|
||||
#ceph_docker_on_openstack: false
|
||||
#containerized_deployment: False
|
||||
|
@ -554,6 +558,7 @@ dummy:
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cinder_pool:
|
||||
# name: "volumes"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -563,6 +568,7 @@ dummy:
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_nova_pool:
|
||||
# name: "vms"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -572,6 +578,7 @@ dummy:
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cinder_backup_pool:
|
||||
# name: "backups"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -581,6 +588,7 @@ dummy:
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_gnocchi_pool:
|
||||
# name: "metrics"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -590,6 +598,27 @@ dummy:
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cephfs_data_pool:
|
||||
# name: "manila_data"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# rule_name: "replicated_rule"
|
||||
# type: 1
|
||||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cephfs_metadata_pool:
|
||||
# name: "manila_metadata"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# rule_name: "replicated_rule"
|
||||
# type: 1
|
||||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
|
||||
#openstack_pools:
|
||||
# - "{{ openstack_glance_pool }}"
|
||||
|
@ -597,6 +626,8 @@ dummy:
|
|||
# - "{{ openstack_nova_pool }}"
|
||||
# - "{{ openstack_cinder_backup_pool }}"
|
||||
# - "{{ openstack_gnocchi_pool }}"
|
||||
# - "{{ openstack_cephfs_data_pool }}"
|
||||
# - "{{ openstack_cephfs_metadata_pool }}"
|
||||
|
||||
|
||||
# The value for 'key' can be a pre-generated key,
|
||||
|
|
|
@ -26,6 +26,7 @@ dummy:
|
|||
# type: 1
|
||||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# size: ""
|
||||
#test2:
|
||||
# name: "test2"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -34,6 +35,7 @@ dummy:
|
|||
# type: 1
|
||||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# size: ""
|
||||
#pools:
|
||||
# - "{{ test }}"
|
||||
# - "{{ test2 }}"
|
||||
|
|
|
@ -64,6 +64,7 @@ dummy:
|
|||
# Whether or not to generate secure certificate to iSCSI gateway nodes
|
||||
#generate_crt: False
|
||||
|
||||
#rbd_pool_size: ""
|
||||
|
||||
##################
|
||||
# RBD-TARGET-API #
|
||||
|
|
|
@ -45,10 +45,13 @@ dummy:
|
|||
#rgw_create_pools:
|
||||
# defaults.rgw.buckets.data:
|
||||
# pg_num: 16
|
||||
# size: ""
|
||||
# defaults.rgw.buckets.index:
|
||||
# pg_num: 32
|
||||
# size: ""
|
||||
# foo:
|
||||
# pg_num: 4
|
||||
# size: ""
|
||||
|
||||
|
||||
##########
|
||||
|
|
|
@ -56,16 +56,10 @@ fetch_directory: ~/ceph-ansible-keys
|
|||
#iscsi_gw_group_name: iscsigws
|
||||
#mgr_group_name: mgrs
|
||||
|
||||
# If check_firewall is true, then ansible will try to determine if the
|
||||
# Ceph ports are blocked by a firewall. If the machine running ansible
|
||||
# cannot reach the Ceph ports for some other reason, you may need or
|
||||
# want to set this to False to skip those checks.
|
||||
#check_firewall: False
|
||||
|
||||
# If configure_firewall is true, then ansible will try to configure the
|
||||
# appropriate firewalling rules so that Ceph daemons can communicate
|
||||
# with each others.
|
||||
#configure_firewall: False
|
||||
#configure_firewall: True
|
||||
|
||||
# Open ports on corresponding nodes if firewall is installed on it
|
||||
#ceph_mon_firewall_zone: public
|
||||
|
@ -105,10 +99,15 @@ fetch_directory: ~/ceph-ansible-keys
|
|||
# Whether or not to install the ceph-test package.
|
||||
#ceph_test: false
|
||||
|
||||
# Enable the ntp service by default to avoid clock skew on
|
||||
# ceph nodes
|
||||
# Enable the ntp service by default to avoid clock skew on ceph nodes
|
||||
# Disable if an appropriate NTP client is already installed and configured
|
||||
#ntp_service_enabled: true
|
||||
|
||||
# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
|
||||
# Note that this selection is currently ignored on containerized deployments
|
||||
#ntp_daemon_type: timesyncd
|
||||
|
||||
|
||||
# Set uid/gid to default '64045' for bootstrap directories.
|
||||
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
|
||||
# These values have to be set according to the base OS used by the container image, NOT the host.
|
||||
|
@ -214,7 +213,7 @@ ceph_rhcs_version: 3
|
|||
#
|
||||
#
|
||||
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
#ceph_stable_openstack_release_uca: liberty
|
||||
#ceph_stable_openstack_release_uca: queens
|
||||
#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
|
||||
|
||||
# REPOSITORY: openSUSE OBS
|
||||
|
@ -355,8 +354,8 @@ ceph_rhcs_version: 3
|
|||
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
|
||||
|
||||
#cephfs_pools:
|
||||
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
|
||||
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
|
||||
# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
|
||||
# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
|
||||
|
||||
## OSD options
|
||||
#
|
||||
|
@ -365,12 +364,13 @@ ceph_rhcs_version: 3
|
|||
#non_hci_safety_factor: 0.7
|
||||
#osd_memory_target: 4000000000
|
||||
#journal_size: 5120 # OSD journal size in MB
|
||||
#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
|
||||
#public_network: 0.0.0.0/0
|
||||
#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
|
||||
#osd_mkfs_type: xfs
|
||||
#osd_mkfs_options_xfs: -f -i size=2048
|
||||
#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
|
||||
#osd_objectstore: filestore
|
||||
#osd_objectstore: bluestore
|
||||
|
||||
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
|
||||
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
|
||||
|
@ -495,7 +495,7 @@ ceph_rhcs_version: 3
|
|||
# OS TUNING #
|
||||
#############
|
||||
|
||||
#disable_transparent_hugepage: true
|
||||
#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
|
||||
#os_tuning_params:
|
||||
# - { name: fs.file-max, value: 26234859 }
|
||||
# - { name: vm.zone_reclaim_mode, value: 0 }
|
||||
|
@ -513,9 +513,13 @@ ceph_rhcs_version: 3
|
|||
##########
|
||||
#docker_exec_cmd:
|
||||
#docker: false
|
||||
#ceph_docker_image: "ceph/daemon"
|
||||
#ceph_docker_image_tag: latest
|
||||
#ceph_docker_registry: docker.io
|
||||
ceph_docker_image: "rhceph-3-rhel7"
|
||||
ceph_docker_image_tag: "latest"
|
||||
ceph_docker_registry: "registry.access.redhat.com/rhceph/"
|
||||
## Client only docker image - defaults to {{ ceph_docker_image }}
|
||||
#ceph_client_docker_image: "{{ ceph_docker_image }}"
|
||||
#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
|
||||
#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
|
||||
#ceph_docker_enable_centos_extra_repo: false
|
||||
#ceph_docker_on_openstack: false
|
||||
#containerized_deployment: False
|
||||
|
@ -554,6 +558,7 @@ ceph_rhcs_version: 3
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cinder_pool:
|
||||
# name: "volumes"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -563,6 +568,7 @@ ceph_rhcs_version: 3
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_nova_pool:
|
||||
# name: "vms"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -572,6 +578,7 @@ ceph_rhcs_version: 3
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cinder_backup_pool:
|
||||
# name: "backups"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -581,6 +588,7 @@ ceph_rhcs_version: 3
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_gnocchi_pool:
|
||||
# name: "metrics"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -590,6 +598,27 @@ ceph_rhcs_version: 3
|
|||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cephfs_data_pool:
|
||||
# name: "manila_data"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# rule_name: "replicated_rule"
|
||||
# type: 1
|
||||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
#openstack_cephfs_metadata_pool:
|
||||
# name: "manila_metadata"
|
||||
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
# rule_name: "replicated_rule"
|
||||
# type: 1
|
||||
# erasure_profile: ""
|
||||
# expected_num_objects: ""
|
||||
# application: "rbd"
|
||||
# size: ""
|
||||
|
||||
#openstack_pools:
|
||||
# - "{{ openstack_glance_pool }}"
|
||||
|
@ -597,6 +626,8 @@ ceph_rhcs_version: 3
|
|||
# - "{{ openstack_nova_pool }}"
|
||||
# - "{{ openstack_cinder_backup_pool }}"
|
||||
# - "{{ openstack_gnocchi_pool }}"
|
||||
# - "{{ openstack_cephfs_data_pool }}"
|
||||
# - "{{ openstack_cephfs_metadata_pool }}"
|
||||
|
||||
|
||||
# The value for 'key' can be a pre-generated key,
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
---
|
||||
# This playbook is used to add a new OSD to
|
||||
# an existing cluster without the need for running
|
||||
# the ceph-docker-common or ceph-common and ceph-mon role again against all
|
||||
# of the existing monitors.
|
||||
#
|
||||
# It can run from any machine. Even if the fetch directory is not present
|
||||
# it will be created.
|
||||
#
|
||||
# Ensure that all monitors are present in the mons
|
||||
# group in your inventory so that the ceph configuration file
|
||||
# is created correctly for the new OSD(s).
|
||||
#
|
||||
# It is expected to edit your inventory file to only point to the OSD hosts
|
||||
# you want to play the playbook on. So you need to comment already deployed OSD
|
||||
# and let uncommented the new OSDs.
|
||||
#
|
||||
- hosts:
|
||||
- mons
|
||||
- osds
|
||||
|
||||
gather_facts: False
|
||||
|
||||
vars:
|
||||
delegate_facts_host: True
|
||||
|
||||
pre_tasks:
|
||||
- name: gather facts
|
||||
setup:
|
||||
when:
|
||||
- not delegate_facts_host | bool
|
||||
|
||||
- name: gather and delegate facts
|
||||
setup:
|
||||
delegate_to: "{{ item }}"
|
||||
delegate_facts: True
|
||||
with_items:
|
||||
- "{{ groups['mons'] }}"
|
||||
- "{{ groups['osds'] }}"
|
||||
run_once: True
|
||||
when:
|
||||
- delegate_facts_host | bool
|
||||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-validate
|
||||
|
||||
- hosts: osds
|
||||
gather_facts: False
|
||||
become: True
|
||||
|
||||
pre_tasks:
|
||||
# this task is needed so we can skip the openstack_config.yml include in roles/ceph-osd
|
||||
- name: set_fact add_osd
|
||||
set_fact:
|
||||
add_osd: True
|
||||
|
||||
- name: set noup flag
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup"
|
||||
delegate_to: "{{ groups['mons'][0] }}"
|
||||
run_once: True
|
||||
changed_when: False
|
||||
|
||||
roles:
|
||||
- role: ceph-defaults
|
||||
- role: ceph-handler
|
||||
- role: ceph-infra
|
||||
- role: ceph-docker-common
|
||||
when: containerized_deployment | bool
|
||||
- role: ceph-common
|
||||
when: not containerized_deployment | bool
|
||||
- role: ceph-config
|
||||
- role: ceph-osd
|
||||
|
||||
post_tasks:
|
||||
- name: unset noup flag
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup"
|
||||
delegate_to: "{{ groups['mons'][0] }}"
|
||||
run_once: True
|
||||
changed_when: False
|
|
@ -0,0 +1,42 @@
|
|||
- hosts:
|
||||
- mons
|
||||
- agents
|
||||
- osds
|
||||
- mdss
|
||||
- rgws
|
||||
- nfss
|
||||
- restapis
|
||||
- rbdmirrors
|
||||
- clients
|
||||
- mgrs
|
||||
- iscsi-gws
|
||||
- iscsigws
|
||||
|
||||
gather_facts: false
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: create a temp directory
|
||||
local_action:
|
||||
module: tempfile
|
||||
state: directory
|
||||
prefix: ceph_ansible
|
||||
run_once: true
|
||||
register: localtempfile
|
||||
|
||||
- name: set_fact lookup_ceph_config - lookup keys, conf and logs
|
||||
shell: ls -1 {{ item }}
|
||||
register: ceph_collect
|
||||
changed_when: false
|
||||
with_items:
|
||||
- /etc/ceph/*
|
||||
- /var/log/ceph/*
|
||||
|
||||
- name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible
|
||||
fetch:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ localtempfile.path }}"
|
||||
fail_on_missing: no
|
||||
flat: no
|
||||
with_items:
|
||||
- "{{ ceph_collect.stdout_lines }}"
|
|
@ -27,6 +27,12 @@
|
|||
file: lv_vars.yaml
|
||||
failed_when: false
|
||||
|
||||
# ensure nvme_device is set
|
||||
- name: fail if nvme_device is not undefined
|
||||
fail:
|
||||
msg: "nvme_device has not been set by the user"
|
||||
when: nvme_device is undefined or nvme_device == 'dummy'
|
||||
|
||||
# need to check if lvm2 is installed
|
||||
- name: install lvm2
|
||||
package:
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
# This playbook is used to add a new OSD to
|
||||
# an existing cluster without the need for running
|
||||
# the ceph-common or ceph-mon role again against all
|
||||
# of the existing monitors.
|
||||
#
|
||||
# Ensure that all monitors are present in the mons
|
||||
# group in your inventory so that the ceph.conf is
|
||||
# created correctly for the new OSD.
|
||||
- hosts: mons
|
||||
become: True
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-fetch-keys
|
||||
|
||||
- hosts: osds
|
||||
become: True
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-osd
|
|
@ -222,7 +222,7 @@
|
|||
timeout: 500
|
||||
|
||||
- name: remove data
|
||||
command: rm -rf /var/lib/ceph/*
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
|
||||
tasks:
|
||||
|
||||
|
@ -350,7 +350,7 @@
|
|||
failed_when: false
|
||||
register: ceph_lockbox_partition_to_erase_path
|
||||
|
||||
- name: zap and destroy OSDs created by ceph-volume
|
||||
- name: zap and destroy osds created by ceph-volume with lvm_volumes
|
||||
ceph_volume:
|
||||
data: "{{ item.data }}"
|
||||
data_vg: "{{ item.data_vg|default(omit) }}"
|
||||
|
@ -367,6 +367,16 @@
|
|||
when:
|
||||
- osd_scenario == "lvm"
|
||||
|
||||
- name: zap and destroy osds created by ceph-volume with devices
|
||||
ceph_volume:
|
||||
data: "{{ item }}"
|
||||
action: "zap"
|
||||
environment:
|
||||
CEPH_VOLUME_DEBUG: 1
|
||||
with_items: "{{ devices | default([]) }}"
|
||||
when:
|
||||
- osd_scenario == "lvm"
|
||||
|
||||
- name: get ceph block partitions
|
||||
shell: |
|
||||
blkid -o device -t PARTLABEL="ceph block"
|
||||
|
@ -534,7 +544,7 @@
|
|||
listen: "remove data"
|
||||
|
||||
- name: remove data
|
||||
command: rm -rf /var/lib/ceph/*
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
listen: "remove data"
|
||||
|
||||
tasks:
|
||||
|
@ -633,6 +643,8 @@
|
|||
find:
|
||||
paths: "/etc/systemd/system"
|
||||
pattern: "ceph*"
|
||||
recurse: true
|
||||
file_type: any
|
||||
register: systemd_files
|
||||
|
||||
- name: remove ceph systemd unit files
|
||||
|
|
|
@ -654,7 +654,7 @@
|
|||
- /var/log/ceph
|
||||
|
||||
- name: remove data
|
||||
command: rm -rf /var/lib/ceph/*
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
|
||||
|
||||
- name: purge fetch directory
|
||||
|
|
|
@ -35,5 +35,20 @@
|
|||
igw_purge: mode="disks"
|
||||
when: igw_purge_type == 'all'
|
||||
|
||||
- name: stop and disable rbd-target-api daemon
|
||||
service:
|
||||
name: rbd-target-api
|
||||
state: stopped
|
||||
enabled: no
|
||||
when: igw_purge_type == 'all'
|
||||
|
||||
- name: stop and disable rbd-target-gw daemon
|
||||
service:
|
||||
name: rbd-target-gw
|
||||
state: stopped
|
||||
enabled: no
|
||||
when: igw_purge_type == 'all'
|
||||
|
||||
- name: restart rbd-target-gw daemons
|
||||
service: name=rbd-target-gw state=restarted
|
||||
when: igw_purge_type == 'lio'
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
become: false
|
||||
vars:
|
||||
- mgr_group_name: mgrs
|
||||
- jewel_minor_update: False
|
||||
|
||||
vars_prompt:
|
||||
- name: ireallymeanit
|
||||
|
@ -41,7 +40,6 @@
|
|||
fail:
|
||||
msg: "Please add a mgr host to your inventory."
|
||||
when:
|
||||
- not jewel_minor_update
|
||||
- groups.get(mgr_group_name, []) | length == 0
|
||||
|
||||
|
||||
|
@ -107,16 +105,27 @@
|
|||
- containerized_deployment
|
||||
- mon_host_count | int == 1
|
||||
|
||||
- name: stop ceph mon
|
||||
- name: stop ceph mon - shortname
|
||||
systemd:
|
||||
name: ceph-mon@{{ ansible_hostname }}
|
||||
state: stopped
|
||||
enabled: yes
|
||||
ignore_errors: True
|
||||
when:
|
||||
- not containerized_deployment
|
||||
|
||||
- name: stop ceph mon - fqdn
|
||||
systemd:
|
||||
name: ceph-mon@{{ ansible_fqdn }}
|
||||
state: stopped
|
||||
enabled: yes
|
||||
ignore_errors: True
|
||||
when:
|
||||
- not containerized_deployment
|
||||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
|
@ -125,7 +134,7 @@
|
|||
post_tasks:
|
||||
- name: start ceph mon
|
||||
systemd:
|
||||
name: ceph-mon@{{ ansible_hostname }}
|
||||
name: ceph-mon@{{ monitor_name }}
|
||||
state: started
|
||||
enabled: yes
|
||||
when:
|
||||
|
@ -133,7 +142,7 @@
|
|||
|
||||
- name: restart containerized ceph mon
|
||||
systemd:
|
||||
name: ceph-mon@{{ ansible_hostname }}
|
||||
name: ceph-mon@{{ monitor_name }}
|
||||
state: restarted
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
|
@ -185,8 +194,6 @@
|
|||
with_items:
|
||||
- noout
|
||||
- norebalance
|
||||
- noscrub
|
||||
- nodeep-scrub
|
||||
delegate_to: "{{ mon_host }}"
|
||||
when: not containerized_deployment
|
||||
|
||||
|
@ -196,8 +203,6 @@
|
|||
with_items:
|
||||
- noout
|
||||
- norebalance
|
||||
- noscrub
|
||||
- nodeep-scrub
|
||||
delegate_to: "{{ mon_host }}"
|
||||
when: containerized_deployment
|
||||
|
||||
|
@ -246,7 +251,6 @@
|
|||
- not containerized_deployment
|
||||
- cephx
|
||||
- groups.get(mgr_group_name, []) | length > 0
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items: "{{ groups.get(mgr_group_name, []) }}"
|
||||
|
||||
|
@ -265,7 +269,6 @@
|
|||
- cephx
|
||||
- groups.get(mgr_group_name, []) | length > 0
|
||||
- inventory_hostname == groups[mon_group_name]|last
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items: "{{ groups.get(mgr_group_name, []) }}"
|
||||
|
||||
|
@ -293,12 +296,11 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
- { role: ceph-mgr,
|
||||
when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
|
||||
(ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
|
||||
- ceph-mgr
|
||||
|
||||
post_tasks:
|
||||
- name: start ceph mgr
|
||||
|
@ -325,7 +327,6 @@
|
|||
health_osd_check_retries: 40
|
||||
health_osd_check_delay: 30
|
||||
upgrade_ceph_packages: True
|
||||
jewel_minor_update: False
|
||||
|
||||
hosts:
|
||||
- "{{ osd_group_name|default('osds') }}"
|
||||
|
@ -357,6 +358,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
|
@ -398,15 +400,11 @@
|
|||
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
|
||||
register: ceph_versions
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- not jewel_minor_update
|
||||
|
||||
- name: set_fact ceph_versions_osd
|
||||
set_fact:
|
||||
ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- not jewel_minor_update
|
||||
|
||||
# length == 1 means there is a single osds versions entry
|
||||
# thus all the osds are running the same version
|
||||
|
@ -415,8 +413,7 @@
|
|||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
|
||||
- ceph_versions_osd | string | search("ceph version 10")
|
||||
- not jewel_minor_update
|
||||
- ceph_versions_osd | string is search("ceph version 10")
|
||||
|
||||
- name: get num_pgs - non container
|
||||
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
|
||||
|
@ -427,9 +424,9 @@
|
|||
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
|
||||
register: ceph_health_post
|
||||
until: >
|
||||
((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1
|
||||
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0)
|
||||
and
|
||||
(ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean"
|
||||
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs)
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
retries: "{{ health_osd_check_retries }}"
|
||||
delay: "{{ health_osd_check_delay }}"
|
||||
|
@ -438,8 +435,6 @@
|
|||
|
||||
|
||||
- name: unset osd flags
|
||||
vars:
|
||||
- jewel_minor_update: False
|
||||
|
||||
hosts:
|
||||
- "{{ mon_group_name|default('mons') }}"
|
||||
|
@ -461,23 +456,17 @@
|
|||
with_items:
|
||||
- noout
|
||||
- norebalance
|
||||
- noscrub
|
||||
- nodeep-scrub
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: get osd versions
|
||||
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
|
||||
register: ceph_versions
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- not jewel_minor_update
|
||||
|
||||
- name: set_fact ceph_versions_osd
|
||||
set_fact:
|
||||
ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- not jewel_minor_update
|
||||
|
||||
# length == 1 means there is a single osds versions entry
|
||||
# thus all the osds are running the same version
|
||||
|
@ -486,9 +475,7 @@
|
|||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
|
||||
- ceph_versions_osd | string | search("ceph version 12")
|
||||
- not jewel_minor_update
|
||||
|
||||
- ceph_versions_osd | string is search("ceph version 12")
|
||||
|
||||
- name: upgrade ceph mdss cluster
|
||||
|
||||
|
@ -512,6 +499,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
|
@ -558,6 +546,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
|
@ -612,6 +601,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
|
@ -662,12 +652,11 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
- { role: ceph-nfs,
|
||||
when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
|
||||
(ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
|
||||
- ceph-nfs
|
||||
|
||||
post_tasks:
|
||||
- name: start nfs gateway
|
||||
|
@ -717,12 +706,11 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
- { role: ceph-iscsi-gw,
|
||||
when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
|
||||
(ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
|
||||
- ceph-iscsi-gw
|
||||
|
||||
post_tasks:
|
||||
- name: start rbd-target-gw
|
||||
|
@ -747,6 +735,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- { role: ceph-common, when: not containerized_deployment }
|
||||
- { role: ceph-docker-common, when: containerized_deployment }
|
||||
- ceph-config
|
||||
|
|
|
@ -76,17 +76,17 @@
|
|||
- name: set_fact ceph_uid for ubuntu
|
||||
set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- name: set_fact ceph_uid for red hat
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("centos") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set_fact ceph_uid for rhel
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image | search("rhceph")
|
||||
when: ceph_docker_image is search("rhceph")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -109,8 +109,16 @@
|
|||
failed_when: false
|
||||
when: ldb_files.rc == 0
|
||||
|
||||
- name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-docker-common
|
||||
command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
|
||||
args:
|
||||
creates: /etc/ceph/{{ cluster }}.mon.keyring
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-mon
|
||||
|
||||
|
@ -151,11 +159,11 @@
|
|||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -168,6 +176,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-mgr
|
||||
|
||||
|
@ -190,7 +199,7 @@
|
|||
pre_tasks:
|
||||
- name: collect running osds and ceph-disk unit(s)
|
||||
shell: |
|
||||
systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]{1,2}.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service'
|
||||
systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service'
|
||||
register: running_osds
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
@ -222,17 +231,17 @@
|
|||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- name: set_fact ceph_uid for red hat
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("centos") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set_fact ceph_uid for rhel
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image | search("rhceph")
|
||||
when: ceph_docker_image is search("rhceph")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -282,6 +291,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-osd
|
||||
|
||||
|
@ -295,9 +305,9 @@
|
|||
command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json"
|
||||
register: ceph_health_post
|
||||
until: >
|
||||
((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1
|
||||
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0)
|
||||
and
|
||||
(ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean"
|
||||
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs)
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
retries: "{{ health_osd_check_retries }}"
|
||||
delay: "{{ health_osd_check_delay }}"
|
||||
|
@ -326,11 +336,11 @@
|
|||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -343,6 +353,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-mds
|
||||
|
||||
|
@ -368,11 +379,11 @@
|
|||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -385,6 +396,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-rgw
|
||||
|
||||
|
@ -410,11 +422,11 @@
|
|||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -427,6 +439,7 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-rbd-mirror
|
||||
|
||||
|
@ -456,11 +469,11 @@
|
|||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu")
|
||||
when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora")
|
||||
when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
|
@ -473,5 +486,6 @@
|
|||
|
||||
roles:
|
||||
- ceph-defaults
|
||||
- ceph-handler
|
||||
- ceph-docker-common
|
||||
- ceph-nfs
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
- hosts: rgws
|
||||
become: True
|
||||
tasks:
|
||||
- include: roles/ceph-rgw/tasks/multisite/destroy.yml
|
||||
- include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml
|
||||
|
||||
handlers:
|
||||
- include: roles/ceph-rgw/handlers/main.yml
|
||||
# Ansible 2.1.0 bug will ignore included handlers without this
|
||||
static: True
|
||||
- name: import_tasks roles/ceph-rgw/handlers/main.yml
|
||||
import_tasks: roles/ceph-rgw/handlers/main.yml
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# This can be done by running `wipefs -a $device_name`.
|
||||
|
||||
# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case.
|
||||
nvme_device: /dev/nvme0n1
|
||||
nvme_device: dummy
|
||||
|
||||
# Path of hdd devices designated for LV creation.
|
||||
hdd_devices:
|
|
@ -205,7 +205,7 @@ def generate_caps(cmd, _type, caps):
|
|||
Generate CephX capabilities list
|
||||
'''
|
||||
|
||||
for k, v in caps.iteritems():
|
||||
for k, v in caps.items():
|
||||
# makes sure someone didn't pass an empty var,
|
||||
# we don't want to add an empty cap
|
||||
if len(k) == 0:
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#!/usr/bin/python
|
||||
import datetime
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.0',
|
||||
|
@ -36,7 +38,7 @@ options:
|
|||
description:
|
||||
- The action to take. Either creating OSDs or zapping devices.
|
||||
required: true
|
||||
choices: ['create', 'zap', 'batch']
|
||||
choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list']
|
||||
default: create
|
||||
data:
|
||||
description:
|
||||
|
@ -63,7 +65,7 @@ options:
|
|||
required: false
|
||||
db_vg:
|
||||
description:
|
||||
- If db is a lv, this must be the name of the volume group it belongs to.
|
||||
- If db is a lv, this must be the name of the volume group it belongs to. # noqa E501
|
||||
- Only applicable if objectstore is 'bluestore'.
|
||||
required: false
|
||||
wal:
|
||||
|
@ -73,7 +75,7 @@ options:
|
|||
required: false
|
||||
wal_vg:
|
||||
description:
|
||||
- If wal is a lv, this must be the name of the volume group it belongs to.
|
||||
- If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501
|
||||
- Only applicable if objectstore is 'bluestore'.
|
||||
required: false
|
||||
crush_device_class:
|
||||
|
@ -95,10 +97,40 @@ options:
|
|||
- Only applicable if action is 'batch'.
|
||||
required: false
|
||||
default: 1
|
||||
|
||||
journal_size:
|
||||
description:
|
||||
- The size in MB of filestore journals.
|
||||
- Only applicable if action is 'batch'.
|
||||
required: false
|
||||
default: 5120
|
||||
block_db_size:
|
||||
description:
|
||||
- The size in bytes of bluestore block db lvs.
|
||||
- The default of -1 means to create them as big as possible.
|
||||
- Only applicable if action is 'batch'.
|
||||
required: false
|
||||
default: -1
|
||||
report:
|
||||
description:
|
||||
- If provided the --report flag will be passed to 'ceph-volume lvm batch'.
|
||||
- No OSDs will be created.
|
||||
- Results will be returned in json format.
|
||||
- Only applicable if action is 'batch'.
|
||||
required: false
|
||||
containerized:
|
||||
description:
|
||||
- Wether or not this is a containerized cluster. The value is
|
||||
assigned or not depending on how the playbook runs.
|
||||
required: false
|
||||
default: None
|
||||
list:
|
||||
description:
|
||||
- List potential Ceph LVM metadata on a device
|
||||
required: false
|
||||
|
||||
author:
|
||||
- Andrew Schoen (@andrewschoen)
|
||||
- Sebastien Han <seb@redhat.com>
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -108,124 +140,207 @@ EXAMPLES = '''
|
|||
data: data-lv
|
||||
data_vg: data-vg
|
||||
journal: /dev/sdc1
|
||||
action: create
|
||||
|
||||
- name: set up a bluestore osd with a raw device for data
|
||||
ceph_volume:
|
||||
objectstore: bluestore
|
||||
data: /dev/sdc
|
||||
action: create
|
||||
|
||||
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db
|
||||
|
||||
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa e501
|
||||
ceph_volume:
|
||||
objectstore: bluestore
|
||||
data: data-lv
|
||||
data_vg: data-vg
|
||||
db: /dev/sdc1
|
||||
wal: /dev/sdc2
|
||||
action: create
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import AnsibleModule # noqa 4502
|
||||
|
||||
|
||||
def fatal(message, module):
|
||||
'''
|
||||
Report a fatal error and exit
|
||||
'''
|
||||
|
||||
if module:
|
||||
module.fail_json(msg=message, changed=False, rc=1)
|
||||
else:
|
||||
raise(Exception(message))
|
||||
|
||||
|
||||
def container_exec(binary, container_image):
|
||||
'''
|
||||
Build the docker CLI to run a command inside a container
|
||||
'''
|
||||
|
||||
command_exec = ['docker', 'run', '--rm', '--privileged', '--net=host',
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
os.path.join('--entrypoint=' + binary),
|
||||
container_image]
|
||||
return command_exec
|
||||
|
||||
|
||||
def build_ceph_volume_cmd(action, container_image, cluster=None):
|
||||
'''
|
||||
Build the ceph-volume command
|
||||
'''
|
||||
|
||||
if container_image:
|
||||
binary = 'ceph-volume'
|
||||
cmd = container_exec(
|
||||
binary, container_image)
|
||||
else:
|
||||
binary = ['ceph-volume']
|
||||
cmd = binary
|
||||
|
||||
if cluster:
|
||||
cmd.extend(['--cluster', cluster])
|
||||
|
||||
cmd.append('lvm')
|
||||
cmd.append(action)
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def exec_command(module, cmd):
|
||||
'''
|
||||
Execute command
|
||||
'''
|
||||
|
||||
rc, out, err = module.run_command(cmd)
|
||||
return rc, cmd, out, err
|
||||
|
||||
|
||||
def is_containerized():
|
||||
'''
|
||||
Check if we are running on a containerized cluster
|
||||
'''
|
||||
|
||||
if 'CEPH_CONTAINER_IMAGE' in os.environ:
|
||||
container_image = os.getenv('CEPH_CONTAINER_IMAGE')
|
||||
else:
|
||||
container_image = None
|
||||
|
||||
return container_image
|
||||
|
||||
|
||||
def get_data(data, data_vg):
|
||||
if data_vg:
|
||||
data = "{0}/{1}".format(data_vg, data)
|
||||
data = '{0}/{1}'.format(data_vg, data)
|
||||
return data
|
||||
|
||||
|
||||
def get_journal(journal, journal_vg):
|
||||
if journal_vg:
|
||||
journal = "{0}/{1}".format(journal_vg, journal)
|
||||
journal = '{0}/{1}'.format(journal_vg, journal)
|
||||
return journal
|
||||
|
||||
|
||||
def get_db(db, db_vg):
|
||||
if db_vg:
|
||||
db = "{0}/{1}".format(db_vg, db)
|
||||
db = '{0}/{1}'.format(db_vg, db)
|
||||
return db
|
||||
|
||||
|
||||
def get_wal(wal, wal_vg):
|
||||
if wal_vg:
|
||||
wal = "{0}/{1}".format(wal_vg, wal)
|
||||
wal = '{0}/{1}'.format(wal_vg, wal)
|
||||
return wal
|
||||
|
||||
|
||||
def batch(module):
|
||||
def batch(module, container_image):
|
||||
'''
|
||||
Batch prepare OSD devices
|
||||
'''
|
||||
|
||||
# get module variables
|
||||
cluster = module.params['cluster']
|
||||
objectstore = module.params['objectstore']
|
||||
batch_devices = module.params['batch_devices']
|
||||
batch_devices = module.params.get('batch_devices', None)
|
||||
crush_device_class = module.params.get('crush_device_class', None)
|
||||
dmcrypt = module.params['dmcrypt']
|
||||
osds_per_device = module.params['osds_per_device']
|
||||
journal_size = module.params.get('journal_size', None)
|
||||
block_db_size = module.params.get('block_db_size', None)
|
||||
dmcrypt = module.params.get('dmcrypt', None)
|
||||
osds_per_device = module.params.get('osds_per_device', None)
|
||||
|
||||
if not osds_per_device:
|
||||
fatal('osds_per_device must be provided if action is "batch"', module)
|
||||
|
||||
if osds_per_device < 1:
|
||||
fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa E501
|
||||
|
||||
if not batch_devices:
|
||||
module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1)
|
||||
fatal('batch_devices must be provided if action is "batch"', module)
|
||||
|
||||
cmd = [
|
||||
'ceph-volume',
|
||||
'--cluster',
|
||||
cluster,
|
||||
'lvm',
|
||||
'batch',
|
||||
'--%s' % objectstore,
|
||||
'--yes',
|
||||
]
|
||||
# Build the CLI
|
||||
action = 'batch'
|
||||
cmd = build_ceph_volume_cmd(action, container_image, cluster)
|
||||
cmd.extend(['--%s' % objectstore])
|
||||
cmd.append('--yes')
|
||||
|
||||
if crush_device_class:
|
||||
cmd.extend(["--crush-device-class", crush_device_class])
|
||||
cmd.extend(['--crush-device-class', crush_device_class])
|
||||
|
||||
if dmcrypt:
|
||||
cmd.append("--dmcrypt")
|
||||
cmd.append('--dmcrypt')
|
||||
|
||||
if osds_per_device > 1:
|
||||
cmd.extend(["--osds-per-device", osds_per_device])
|
||||
cmd.extend(['--osds-per-device', osds_per_device])
|
||||
|
||||
if objectstore == 'filestore':
|
||||
cmd.extend(['--journal-size', journal_size])
|
||||
|
||||
if objectstore == 'bluestore' and block_db_size != '-1':
|
||||
cmd.extend(['--block-db-size', block_db_size])
|
||||
|
||||
cmd.extend(batch_devices)
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
cmd=cmd,
|
||||
stdout='',
|
||||
stderr='',
|
||||
rc='',
|
||||
start='',
|
||||
end='',
|
||||
delta='',
|
||||
)
|
||||
|
||||
if module.check_mode:
|
||||
return result
|
||||
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
rc, out, err = module.run_command(cmd, encoding=None)
|
||||
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
||||
result = dict(
|
||||
cmd=cmd,
|
||||
stdout=out.rstrip(b"\r\n"),
|
||||
stderr=err.rstrip(b"\r\n"),
|
||||
rc=rc,
|
||||
start=str(startd),
|
||||
end=str(endd),
|
||||
delta=str(delta),
|
||||
changed=True,
|
||||
)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='non-zero return code', **result)
|
||||
|
||||
module.exit_json(**result)
|
||||
return cmd
|
||||
|
||||
|
||||
def create_osd(module):
|
||||
def ceph_volume_cmd(subcommand, container_image, cluster=None):
|
||||
'''
|
||||
Build ceph-volume initial command
|
||||
'''
|
||||
|
||||
if container_image:
|
||||
binary = 'ceph-volume'
|
||||
cmd = container_exec(
|
||||
binary, container_image)
|
||||
else:
|
||||
binary = ['ceph-volume']
|
||||
cmd = binary
|
||||
|
||||
if cluster:
|
||||
cmd.extend(['--cluster', cluster])
|
||||
|
||||
cmd.append('lvm')
|
||||
cmd.append(subcommand)
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def prepare_or_create_osd(module, action, container_image):
|
||||
'''
|
||||
Prepare or create OSD devices
|
||||
'''
|
||||
|
||||
# get module variables
|
||||
cluster = module.params['cluster']
|
||||
objectstore = module.params['objectstore']
|
||||
data = module.params['data']
|
||||
data_vg = module.params.get('data_vg', None)
|
||||
data = get_data(data, data_vg)
|
||||
journal = module.params.get('journal', None)
|
||||
journal_vg = module.params.get('journal_vg', None)
|
||||
db = module.params.get('db', None)
|
||||
|
@ -233,94 +348,79 @@ def create_osd(module):
|
|||
wal = module.params.get('wal', None)
|
||||
wal_vg = module.params.get('wal_vg', None)
|
||||
crush_device_class = module.params.get('crush_device_class', None)
|
||||
dmcrypt = module.params['dmcrypt']
|
||||
dmcrypt = module.params.get('dmcrypt', None)
|
||||
|
||||
cmd = [
|
||||
'ceph-volume',
|
||||
'--cluster',
|
||||
cluster,
|
||||
'lvm',
|
||||
'create',
|
||||
'--%s' % objectstore,
|
||||
'--data',
|
||||
]
|
||||
|
||||
data = get_data(data, data_vg)
|
||||
# Build the CLI
|
||||
cmd = build_ceph_volume_cmd(action, container_image, cluster)
|
||||
cmd.extend(['--%s' % objectstore])
|
||||
cmd.append('--data')
|
||||
cmd.append(data)
|
||||
|
||||
if journal:
|
||||
journal = get_journal(journal, journal_vg)
|
||||
cmd.extend(["--journal", journal])
|
||||
cmd.extend(['--journal', journal])
|
||||
|
||||
if db:
|
||||
db = get_db(db, db_vg)
|
||||
cmd.extend(["--block.db", db])
|
||||
cmd.extend(['--block.db', db])
|
||||
|
||||
if wal:
|
||||
wal = get_wal(wal, wal_vg)
|
||||
cmd.extend(["--block.wal", wal])
|
||||
cmd.extend(['--block.wal', wal])
|
||||
|
||||
if crush_device_class:
|
||||
cmd.extend(["--crush-device-class", crush_device_class])
|
||||
cmd.extend(['--crush-device-class', crush_device_class])
|
||||
|
||||
if dmcrypt:
|
||||
cmd.append("--dmcrypt")
|
||||
cmd.append('--dmcrypt')
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
cmd=cmd,
|
||||
stdout='',
|
||||
stderr='',
|
||||
rc='',
|
||||
start='',
|
||||
end='',
|
||||
delta='',
|
||||
)
|
||||
|
||||
if module.check_mode:
|
||||
return result
|
||||
|
||||
# check to see if osd already exists
|
||||
# FIXME: this does not work when data is a raw device
|
||||
# support for 'lvm list' and raw devices was added with https://github.com/ceph/ceph/pull/20620 but
|
||||
# has not made it to a luminous release as of 12.2.4
|
||||
rc, out, err = module.run_command(["ceph-volume", "lvm", "list", data], encoding=None)
|
||||
if rc == 0:
|
||||
result["stdout"] = "skipped, since {0} is already used for an osd".format(data)
|
||||
result['rc'] = 0
|
||||
module.exit_json(**result)
|
||||
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
rc, out, err = module.run_command(cmd, encoding=None)
|
||||
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
||||
result = dict(
|
||||
cmd=cmd,
|
||||
stdout=out.rstrip(b"\r\n"),
|
||||
stderr=err.rstrip(b"\r\n"),
|
||||
rc=rc,
|
||||
start=str(startd),
|
||||
end=str(endd),
|
||||
delta=str(delta),
|
||||
changed=True,
|
||||
)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='non-zero return code', **result)
|
||||
|
||||
module.exit_json(**result)
|
||||
return cmd
|
||||
|
||||
|
||||
def zap_devices(module):
|
||||
"""
|
||||
def list_osd(module, container_image):
|
||||
'''
|
||||
List will detect wether or not a device has Ceph LVM Metadata
|
||||
'''
|
||||
|
||||
# get module variables
|
||||
cluster = module.params['cluster']
|
||||
data = module.params.get('data', None)
|
||||
data_vg = module.params.get('data_vg', None)
|
||||
data = get_data(data, data_vg)
|
||||
|
||||
# Build the CLI
|
||||
action = 'list'
|
||||
cmd = build_ceph_volume_cmd(action, container_image, cluster)
|
||||
if data:
|
||||
cmd.append(data)
|
||||
cmd.append('--format=json')
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def activate_osd():
|
||||
'''
|
||||
Activate all the OSDs on a machine
|
||||
'''
|
||||
|
||||
# build the CLI
|
||||
action = 'activate'
|
||||
container_image = None
|
||||
cmd = build_ceph_volume_cmd(action, container_image)
|
||||
cmd.append('--all')
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def zap_devices(module, container_image):
|
||||
'''
|
||||
Will run 'ceph-volume lvm zap' on all devices, lvs and partitions
|
||||
used to create the OSD. The --destroy flag is always passed so that
|
||||
if an OSD was originally created with a raw device or partition for
|
||||
'data' then any lvs that were created by ceph-volume are removed.
|
||||
"""
|
||||
'''
|
||||
|
||||
# get module variables
|
||||
data = module.params['data']
|
||||
data_vg = module.params.get('data_vg', None)
|
||||
journal = module.params.get('journal', None)
|
||||
|
@ -329,72 +429,36 @@ def zap_devices(module):
|
|||
db_vg = module.params.get('db_vg', None)
|
||||
wal = module.params.get('wal', None)
|
||||
wal_vg = module.params.get('wal_vg', None)
|
||||
|
||||
base_zap_cmd = [
|
||||
'ceph-volume',
|
||||
'lvm',
|
||||
'zap',
|
||||
# for simplicity always --destroy. It will be needed
|
||||
# for raw devices and will noop for lvs.
|
||||
'--destroy',
|
||||
]
|
||||
|
||||
commands = []
|
||||
|
||||
data = get_data(data, data_vg)
|
||||
|
||||
commands.append(base_zap_cmd + [data])
|
||||
# build the CLI
|
||||
action = 'zap'
|
||||
cmd = build_ceph_volume_cmd(action, container_image)
|
||||
cmd.append('--destroy')
|
||||
cmd.append(data)
|
||||
|
||||
if journal:
|
||||
journal = get_journal(journal, journal_vg)
|
||||
commands.append(base_zap_cmd + [journal])
|
||||
cmd.extend([journal])
|
||||
|
||||
if db:
|
||||
db = get_db(db, db_vg)
|
||||
commands.append(base_zap_cmd + [db])
|
||||
cmd.extend([db])
|
||||
|
||||
if wal:
|
||||
wal = get_wal(wal, wal_vg)
|
||||
commands.append(base_zap_cmd + [wal])
|
||||
cmd.extend([wal])
|
||||
|
||||
result = dict(
|
||||
changed=True,
|
||||
rc=0,
|
||||
)
|
||||
command_results = []
|
||||
for cmd in commands:
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
rc, out, err = module.run_command(cmd, encoding=None)
|
||||
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
||||
cmd_result = dict(
|
||||
cmd=cmd,
|
||||
stdout_lines=out.split("\n"),
|
||||
stderr_lines=err.split("\n"),
|
||||
rc=rc,
|
||||
start=str(startd),
|
||||
end=str(endd),
|
||||
delta=str(delta),
|
||||
)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='non-zero return code', **cmd_result)
|
||||
|
||||
command_results.append(cmd_result)
|
||||
|
||||
result["commands"] = command_results
|
||||
|
||||
module.exit_json(**result)
|
||||
return cmd
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict(
|
||||
cluster=dict(type='str', required=False, default='ceph'),
|
||||
objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
|
||||
action=dict(type='str', required=False, choices=['create', 'zap', 'batch'], default='create'),
|
||||
objectstore=dict(type='str', required=False, choices=[
|
||||
'bluestore', 'filestore'], default='bluestore'),
|
||||
action=dict(type='str', required=False, choices=[
|
||||
'create', 'zap', 'batch', 'prepare', 'activate', 'list'], default='create'), # noqa 4502
|
||||
data=dict(type='str', required=False),
|
||||
data_vg=dict(type='str', required=False),
|
||||
journal=dict(type='str', required=False),
|
||||
|
@ -407,6 +471,10 @@ def run_module():
|
|||
dmcrypt=dict(type='bool', required=False, default=False),
|
||||
batch_devices=dict(type='list', required=False, default=[]),
|
||||
osds_per_device=dict(type='int', required=False, default=1),
|
||||
journal_size=dict(type='str', required=False, default='5120'),
|
||||
block_db_size=dict(type='str', required=False, default='-1'),
|
||||
report=dict(type='bool', required=False, default=False),
|
||||
containerized=dict(type='str', required=False, default=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
|
@ -414,16 +482,144 @@ def run_module():
|
|||
supports_check_mode=True
|
||||
)
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
stdout='',
|
||||
stderr='',
|
||||
rc='',
|
||||
start='',
|
||||
end='',
|
||||
delta='',
|
||||
)
|
||||
|
||||
if module.check_mode:
|
||||
return result
|
||||
|
||||
# start execution
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
# get the desired action
|
||||
action = module.params['action']
|
||||
|
||||
if action == "create":
|
||||
create_osd(module)
|
||||
elif action == "zap":
|
||||
zap_devices(module)
|
||||
elif action == "batch":
|
||||
batch(module)
|
||||
# will return either the image name or None
|
||||
container_image = is_containerized()
|
||||
|
||||
module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
|
||||
# Assume the task's status will be 'changed'
|
||||
changed = True
|
||||
|
||||
if action == 'create' or action == 'prepare':
|
||||
# First test if the device has Ceph LVM Metadata
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, list_osd(module, container_image))
|
||||
|
||||
# list_osd returns a dict, if the dict is empty this means
|
||||
# we can not check the return code since it's not consistent
|
||||
# with the plain output
|
||||
# see: http://tracker.ceph.com/issues/36329
|
||||
# FIXME: it's probably less confusing to check for rc
|
||||
|
||||
# convert out to json, ansible returns a string...
|
||||
try:
|
||||
out_dict = json.loads(out)
|
||||
except ValueError:
|
||||
fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa E501
|
||||
|
||||
if out_dict:
|
||||
data = module.params['data']
|
||||
result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501
|
||||
data)
|
||||
result['rc'] = 0
|
||||
module.exit_json(**result)
|
||||
|
||||
# Prepare or create the OSD
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, prepare_or_create_osd(module, action, container_image))
|
||||
|
||||
elif action == 'activate':
|
||||
if container_image:
|
||||
fatal(
|
||||
"This is not how container's activation happens, nothing to activate", module) # noqa E501
|
||||
|
||||
# Activate the OSD
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, activate_osd())
|
||||
|
||||
elif action == 'zap':
|
||||
# Zap the OSD
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, zap_devices(module, container_image))
|
||||
|
||||
elif action == 'list':
|
||||
# List Ceph LVM Metadata on a device
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, list_osd(module, container_image))
|
||||
|
||||
elif action == 'batch':
|
||||
# Batch prepare AND activate OSDs
|
||||
if container_image:
|
||||
fatal(
|
||||
'Batch operation is currently not supported on containerized deployment (https://tracker.ceph.com/issues/36363)', module) # noqa E501
|
||||
|
||||
report = module.params.get('report', None)
|
||||
|
||||
# Add --report flag for the idempotency test
|
||||
report_flags = [
|
||||
'--report',
|
||||
'--format=json',
|
||||
]
|
||||
|
||||
cmd = batch(module, container_image)
|
||||
batch_report_cmd = copy.copy(cmd)
|
||||
batch_report_cmd.extend(report_flags)
|
||||
|
||||
# Run batch --report to see what's going to happen
|
||||
# Do not run the batch command if there is nothing to do
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, batch_report_cmd)
|
||||
try:
|
||||
report_result = json.loads(out)
|
||||
except ValueError:
|
||||
result = dict(
|
||||
cmd=cmd,
|
||||
stdout=out.rstrip(b"\r\n"),
|
||||
stderr=err.rstrip(b"\r\n"),
|
||||
rc=rc,
|
||||
changed=changed,
|
||||
)
|
||||
module.fail_json(msg='non-zero return code', **result)
|
||||
|
||||
if not report:
|
||||
# if not asking for a report, let's just run the batch command
|
||||
changed = report_result['changed']
|
||||
if changed:
|
||||
# Batch prepare the OSD
|
||||
rc, cmd, out, err = exec_command(
|
||||
module, batch(module, container_image))
|
||||
else:
|
||||
cmd = batch_report_cmd
|
||||
|
||||
else:
|
||||
module.fail_json(
|
||||
msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch".', changed=False, rc=1) # noqa E501
|
||||
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
||||
result = dict(
|
||||
cmd=cmd,
|
||||
start=str(startd),
|
||||
end=str(endd),
|
||||
delta=str(delta),
|
||||
rc=rc,
|
||||
stdout=out.rstrip(b'\r\n'),
|
||||
stderr=err.rstrip(b'\r\n'),
|
||||
changed=changed,
|
||||
)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='non-zero return code', **result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from . import ceph_volume
|
||||
from ansible.compat.tests.mock import MagicMock
|
||||
|
||||
|
||||
class TestCephVolumeModule(object):
|
||||
|
@ -34,3 +35,242 @@ class TestCephVolumeModule(object):
|
|||
def test_wal_with_vg(self):
|
||||
result = ceph_volume.get_wal("wal-lv", "wal-vg")
|
||||
assert result == "wal-vg/wal-lv"
|
||||
|
||||
def test_container_exec(sefl):
|
||||
fake_binary = "ceph-volume"
|
||||
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
|
||||
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph-volume',
|
||||
'docker.io/ceph/daemon:latest-luminous']
|
||||
result = ceph_volume.container_exec(fake_binary, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_zap_osd_container(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda'}
|
||||
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
|
||||
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph-volume',
|
||||
'docker.io/ceph/daemon:latest-luminous',
|
||||
'lvm',
|
||||
'zap',
|
||||
'--destroy',
|
||||
'/dev/sda']
|
||||
result = ceph_volume.zap_devices(fake_module, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_zap_osd(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda'}
|
||||
fake_container_image = None
|
||||
expected_command_list = ['ceph-volume',
|
||||
'lvm',
|
||||
'zap',
|
||||
'--destroy',
|
||||
'/dev/sda']
|
||||
result = ceph_volume.zap_devices(fake_module, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_activate_osd(self):
|
||||
expected_command_list = ['ceph-volume',
|
||||
'lvm',
|
||||
'activate',
|
||||
'--all']
|
||||
result = ceph_volume.activate_osd()
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_list_osd(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
|
||||
fake_container_image = None
|
||||
expected_command_list = ['ceph-volume',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'list',
|
||||
'/dev/sda',
|
||||
'--format=json',
|
||||
]
|
||||
result = ceph_volume.list_osd(fake_module, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_list_osd_container(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
|
||||
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
|
||||
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph-volume',
|
||||
'docker.io/ceph/daemon:latest-luminous',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'list',
|
||||
'/dev/sda',
|
||||
'--format=json',
|
||||
]
|
||||
result = ceph_volume.list_osd(fake_module, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_create_osd_container(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda',
|
||||
'objectstore': 'filestore',
|
||||
'cluster': 'ceph', }
|
||||
|
||||
fake_action = "create"
|
||||
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
|
||||
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph-volume',
|
||||
'docker.io/ceph/daemon:latest-luminous',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'create',
|
||||
'--filestore',
|
||||
'--data',
|
||||
'/dev/sda']
|
||||
result = ceph_volume.prepare_or_create_osd(
|
||||
fake_module, fake_action, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_create_osd(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda',
|
||||
'objectstore': 'filestore',
|
||||
'cluster': 'ceph', }
|
||||
|
||||
fake_container_image = None
|
||||
fake_action = "create"
|
||||
expected_command_list = ['ceph-volume',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'create',
|
||||
'--filestore',
|
||||
'--data',
|
||||
'/dev/sda']
|
||||
result = ceph_volume.prepare_or_create_osd(
|
||||
fake_module, fake_action, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_prepare_osd_container(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda',
|
||||
'objectstore': 'filestore',
|
||||
'cluster': 'ceph', }
|
||||
|
||||
fake_action = "prepare"
|
||||
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
|
||||
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph-volume',
|
||||
'docker.io/ceph/daemon:latest-luminous',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'prepare',
|
||||
'--filestore',
|
||||
'--data',
|
||||
'/dev/sda']
|
||||
result = ceph_volume.prepare_or_create_osd(
|
||||
fake_module, fake_action, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_prepare_osd(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda',
|
||||
'objectstore': 'filestore',
|
||||
'cluster': 'ceph', }
|
||||
|
||||
fake_container_image = None
|
||||
fake_action = "prepare"
|
||||
expected_command_list = ['ceph-volume',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'prepare',
|
||||
'--filestore',
|
||||
'--data',
|
||||
'/dev/sda']
|
||||
result = ceph_volume.prepare_or_create_osd(
|
||||
fake_module, fake_action, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_batch_osd_container(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda',
|
||||
'objectstore': 'filestore',
|
||||
'journal_size': '100',
|
||||
'cluster': 'ceph',
|
||||
'batch_devices': ["/dev/sda", "/dev/sdb"]}
|
||||
|
||||
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
|
||||
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
|
||||
'-v', '/run/lock/lvm:/run/lock/lvm:z',
|
||||
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
|
||||
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph-volume',
|
||||
'docker.io/ceph/daemon:latest-luminous',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'batch',
|
||||
'--filestore',
|
||||
'--yes',
|
||||
'--journal-size',
|
||||
'100',
|
||||
'/dev/sda',
|
||||
'/dev/sdb']
|
||||
result = ceph_volume.batch(
|
||||
fake_module, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
||||
def test_batch_osd(self):
|
||||
fake_module = MagicMock()
|
||||
fake_module.params = {'data': '/dev/sda',
|
||||
'objectstore': 'filestore',
|
||||
'journal_size': '100',
|
||||
'cluster': 'ceph',
|
||||
'batch_devices': ["/dev/sda", "/dev/sdb"]}
|
||||
|
||||
fake_container_image = None
|
||||
expected_command_list = ['ceph-volume',
|
||||
'--cluster',
|
||||
'ceph',
|
||||
'lvm',
|
||||
'batch',
|
||||
'--filestore',
|
||||
'--yes',
|
||||
'--journal-size',
|
||||
'100',
|
||||
'/dev/sda',
|
||||
'/dev/sdb']
|
||||
result = ceph_volume.batch(
|
||||
fake_module, fake_container_image)
|
||||
assert result == expected_command_list
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -14,6 +15,11 @@ except ImportError:
|
|||
display.error(msg)
|
||||
raise SystemExit(msg)
|
||||
|
||||
if LooseVersion(notario.__version__) < LooseVersion("0.0.13"):
|
||||
msg = "The python-notario libary has an incompatible version. Version >= 0.0.13 is needed, current version: %s" % notario.__version__
|
||||
display.error(msg)
|
||||
raise SystemExit(msg)
|
||||
|
||||
from notario.exceptions import Invalid
|
||||
from notario.validators import types, chainable, iterables
|
||||
from notario.decorators import optional
|
||||
|
@ -96,7 +102,12 @@ class ActionModule(ActionBase):
|
|||
reason = "[{}] Reason: {}".format(host, error.reason)
|
||||
try:
|
||||
if "schema is missing" not in error.message:
|
||||
given = "[{}] Given value for {}: {}".format(host, error.path[0], error.path[1])
|
||||
for i in range(0, len(error.path)):
|
||||
if i == 0:
|
||||
given = "[{}] Given value for {}".format(
|
||||
host, error.path[0])
|
||||
else:
|
||||
given = given + ": {}".format(error.path[i])
|
||||
display.error(given)
|
||||
else:
|
||||
given = ""
|
||||
|
|
|
@ -2,4 +2,7 @@ ceph_repository: rhcs
|
|||
ceph_origin: repository
|
||||
fetch_directory: ~/ceph-ansible-keys
|
||||
ceph_rhcs_version: 3
|
||||
ceph_docker_image: "rhceph-3-rhel7"
|
||||
ceph_docker_image_tag: "latest"
|
||||
ceph_docker_registry: "registry.access.redhat.com/rhceph/"
|
||||
# END OF FILE, DO NOT TOUCH ME!
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include pre_requisite.yml
|
||||
include: pre_requisite.yml
|
||||
include_tasks: pre_requisite.yml
|
||||
|
||||
- name: include start_agent.yml
|
||||
include: start_agent.yml
|
||||
include_tasks: start_agent.yml
|
||||
|
|
|
@ -18,6 +18,7 @@ test:
|
|||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
size: ""
|
||||
test2:
|
||||
name: "test2"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -26,6 +27,7 @@ test2:
|
|||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
size: ""
|
||||
pools:
|
||||
- "{{ test }}"
|
||||
- "{{ test2 }}"
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
|
||||
--name ceph-create-keys \
|
||||
--entrypoint=sleep \
|
||||
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
||||
{{ ceph_client_docker_registry}}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} \
|
||||
300
|
||||
changed_when: false
|
||||
when:
|
||||
|
@ -84,40 +84,51 @@
|
|||
- keys | length > 0
|
||||
- inventory_hostname == groups.get('_filtered_clients') | first
|
||||
|
||||
- name: list existing pool(s)
|
||||
command: >
|
||||
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
|
||||
osd pool get {{ item.name }} size
|
||||
with_items: "{{ pools }}"
|
||||
register: created_pools
|
||||
failed_when: false
|
||||
delegate_to: "{{ delegated_node }}"
|
||||
- name: pool related tasks
|
||||
when:
|
||||
- condition_copy_admin_key
|
||||
- inventory_hostname == groups.get('_filtered_clients', []) | first
|
||||
block:
|
||||
- name: list existing pool(s)
|
||||
command: >
|
||||
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
|
||||
osd pool get {{ item.name }} size
|
||||
with_items: "{{ pools }}"
|
||||
register: created_pools
|
||||
failed_when: false
|
||||
delegate_to: "{{ delegated_node }}"
|
||||
|
||||
- name: create ceph pool(s)
|
||||
command: >
|
||||
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
|
||||
osd pool create {{ item.0.name }}
|
||||
{{ item.0.pg_num }}
|
||||
{{ item.0.pgp_num }}
|
||||
{{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
|
||||
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
|
||||
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
|
||||
{{ item.0.erasure_profile }}
|
||||
{%- endif %}
|
||||
{{ item.0.expected_num_objects | default('') }}
|
||||
with_together:
|
||||
- "{{ pools }}"
|
||||
- "{{ created_pools.results }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ delegated_node }}"
|
||||
when:
|
||||
- pools | length > 0
|
||||
- condition_copy_admin_key
|
||||
- inventory_hostname in groups.get('_filtered_clients') | first
|
||||
- item.1.rc != 0
|
||||
- name: create ceph pool(s)
|
||||
command: >
|
||||
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
|
||||
osd pool create {{ item.0.name }}
|
||||
{{ item.0.pg_num }}
|
||||
{{ item.0.pgp_num }}
|
||||
{{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
|
||||
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
|
||||
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
|
||||
{{ item.0.erasure_profile }}
|
||||
{%- endif %}
|
||||
{{ item.0.expected_num_objects | default('') }}
|
||||
with_together:
|
||||
- "{{ pools }}"
|
||||
- "{{ created_pools.results }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ delegated_node }}"
|
||||
when:
|
||||
- pools | length > 0
|
||||
- item.1.rc != 0
|
||||
|
||||
- name: customize pool size
|
||||
command: >
|
||||
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
|
||||
osd pool set {{ item.name }} size {{ item.size | default('') }}
|
||||
with_items: "{{ pools | unique }}"
|
||||
delegate_to: "{{ delegate_node }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- pools | length > 0
|
||||
- item.size | default ("") != ""
|
||||
|
||||
- name: get client cephx keys
|
||||
copy:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
- name: include pre_requisite.yml
|
||||
include: pre_requisite.yml
|
||||
include_tasks: pre_requisite.yml
|
||||
|
||||
- name: include create_users_keys.yml
|
||||
include: create_users_keys.yml
|
||||
include_tasks: create_users_keys.yml
|
||||
when:
|
||||
- user_config
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
ignore_errors: true
|
||||
check_mode: no
|
||||
|
||||
- include: install_pypy.yml
|
||||
- include_tasks: install_pypy.yml
|
||||
when: need_python | failed
|
||||
|
||||
- name: check if there is pip
|
||||
|
@ -14,5 +14,5 @@
|
|||
ignore_errors: true
|
||||
check_mode: no
|
||||
|
||||
- include: install_pip.yml
|
||||
- include_tasks: install_pip.yml
|
||||
when: need_pip | failed and need_python | failed
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
---
|
||||
- name: check if nmap is installed
|
||||
local_action:
|
||||
module: command
|
||||
command -v nmap
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: nmapexist
|
||||
run_once: true
|
||||
check_mode: no
|
||||
|
||||
- name: inform that nmap is not present
|
||||
debug:
|
||||
msg: "nmap is not installed, can not test if ceph ports are allowed :("
|
||||
run_once: true
|
||||
when:
|
||||
- nmapexist.rc != 0
|
||||
|
||||
- name: check if monitor port is not filtered
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: monportstate
|
||||
check_mode: no
|
||||
when:
|
||||
- mon_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
|
||||
- name: fail if monitor port is filtered
|
||||
fail:
|
||||
msg: "Please allow port 6789 on your firewall"
|
||||
when:
|
||||
- mon_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
- monportstate.rc == 0
|
||||
|
||||
- name: check if osd and mds range is not filtered (osd hosts)
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: osdrangestate
|
||||
check_mode: no
|
||||
when:
|
||||
- osd_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
|
||||
- name: fail if osd and mds range is filtered (osd hosts)
|
||||
fail:
|
||||
msg: "Please allow range from 6800 to 7300 on your firewall"
|
||||
when:
|
||||
- osd_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
- osdrangestate.rc == 0
|
||||
|
||||
- name: check if osd and mds range is not filtered (mds hosts)
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: mdsrangestate
|
||||
check_mode: no
|
||||
when:
|
||||
- mds_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
|
||||
- name: fail if osd and mds range is filtered (mds hosts)
|
||||
fail:
|
||||
msg: "Please allow range from 6800 to 7300 on your firewall"
|
||||
when:
|
||||
- mds_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
- mdsrangestate.rc == 0
|
||||
|
||||
- name: check if rados gateway port is not filtered
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p {{ radosgw_frontend_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: rgwportstate
|
||||
check_mode: no
|
||||
when:
|
||||
- rgw_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
|
||||
- name: fail if rados gateway port is filtered
|
||||
fail:
|
||||
msg: "Please allow port {{ radosgw_frontend_port }} on your firewall"
|
||||
when:
|
||||
- rgw_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
- rgwportstate.rc == 0
|
||||
|
||||
- name: check if NFS ports are not filtered
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: nfsportstate
|
||||
check_mode: no
|
||||
when:
|
||||
- nfs_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
|
||||
- name: fail if NFS ports are filtered
|
||||
fail:
|
||||
msg: "Please allow ports 111 and 2049 on your firewall"
|
||||
when:
|
||||
- nfs_group_name in group_names
|
||||
- nmapexist.rc == 0
|
||||
- nfsportstate.rc == 0
|
|
@ -1,26 +1,26 @@
|
|||
---
|
||||
- name: include debian_community_repository.yml
|
||||
include: debian_community_repository.yml
|
||||
include_tasks: debian_community_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'community'
|
||||
|
||||
- name: include debian_rhcs_repository.yml
|
||||
include: debian_rhcs_repository.yml
|
||||
include_tasks: debian_rhcs_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'rhcs'
|
||||
|
||||
- name: include debian_dev_repository.yml
|
||||
include: debian_dev_repository.yml
|
||||
include_tasks: debian_dev_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'dev'
|
||||
|
||||
- name: include debian_custom_repository.yml
|
||||
include: debian_custom_repository.yml
|
||||
include_tasks: debian_custom_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'custom'
|
||||
|
||||
- name: include debian_uca_repository.yml
|
||||
include: debian_uca_repository.yml
|
||||
include_tasks: debian_uca_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'uca'
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
---
|
||||
- name: include redhat_community_repository.yml
|
||||
include: redhat_community_repository.yml
|
||||
include_tasks: redhat_community_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'community'
|
||||
|
||||
- name: include redhat_rhcs_repository.yml
|
||||
include: redhat_rhcs_repository.yml
|
||||
include_tasks: redhat_rhcs_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'rhcs'
|
||||
|
||||
- name: include redhat_dev_repository.yml
|
||||
include: redhat_dev_repository.yml
|
||||
include_tasks: redhat_dev_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'dev'
|
||||
|
||||
- name: include redhat_custom_repository.yml
|
||||
include: redhat_custom_repository.yml
|
||||
include_tasks: redhat_custom_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'custom'
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
- name: include suse_obs_repository.yml
|
||||
include: suse_obs_repository.yml
|
||||
include_tasks: suse_obs_repository.yml
|
||||
when:
|
||||
- ceph_repository == 'obs'
|
||||
|
|
|
@ -8,11 +8,11 @@
|
|||
mode: 0644
|
||||
|
||||
- name: include prerequisite_rhcs_iso_install_debian.yml
|
||||
include: prerequisite_rhcs_iso_install_debian.yml
|
||||
include_tasks: prerequisite_rhcs_iso_install_debian.yml
|
||||
when:
|
||||
- ceph_repository_type == 'iso'
|
||||
|
||||
- name: include prerequisite_rhcs_cdn_install_debian.yml
|
||||
include: prerequisite_rhcs_cdn_install_debian.yml
|
||||
include_tasks: prerequisite_rhcs_cdn_install_debian.yml
|
||||
when:
|
||||
- ceph_repository_type == 'cdn'
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include configure_debian_repository_installation.yml
|
||||
include: configure_debian_repository_installation.yml
|
||||
include_tasks: configure_debian_repository_installation.yml
|
||||
when:
|
||||
- ceph_origin == 'repository'
|
||||
|
||||
|
@ -28,13 +28,13 @@
|
|||
cache_valid_time: 3600
|
||||
|
||||
- name: include install_debian_packages.yml
|
||||
include: install_debian_packages.yml
|
||||
include_tasks: install_debian_packages.yml
|
||||
when:
|
||||
- (ceph_origin == 'repository' or ceph_origin == 'distro')
|
||||
- ceph_repository != 'rhcs'
|
||||
|
||||
- name: include install_debian_rhcs_packages.yml
|
||||
include: install_debian_rhcs_packages.yml
|
||||
include_tasks: install_debian_rhcs_packages.yml
|
||||
when:
|
||||
- (ceph_origin == 'repository' or ceph_origin == 'distro')
|
||||
- ceph_repository == 'rhcs'
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
---
|
||||
- name: include configure_redhat_repository_installation.yml
|
||||
include: configure_redhat_repository_installation.yml
|
||||
include_tasks: configure_redhat_repository_installation.yml
|
||||
when:
|
||||
- ceph_origin == 'repository'
|
||||
|
||||
- name: include configure_redhat_local_installation.yml
|
||||
include: configure_redhat_local_installation.yml
|
||||
include_tasks: configure_redhat_local_installation.yml
|
||||
when:
|
||||
- ceph_origin == 'local'
|
||||
|
||||
- name: include install_redhat_packages.yml
|
||||
include: install_redhat_packages.yml
|
||||
include_tasks: install_redhat_packages.yml
|
||||
when:
|
||||
- (ceph_origin == 'repository' or ceph_origin == 'distro')
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
- ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs')
|
||||
|
||||
- name: include configure_suse_repository_installation.yml
|
||||
include: configure_suse_repository_installation.yml
|
||||
include_tasks: configure_suse_repository_installation.yml
|
||||
when:
|
||||
- ceph_origin == 'repository'
|
||||
|
||||
|
@ -21,4 +21,4 @@
|
|||
with_items: "{{ suse_package_dependencies }}"
|
||||
|
||||
- name: include install_suse_packages.yml
|
||||
include: install_suse_packages.yml
|
||||
include_tasks: install_suse_packages.yml
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: include prerequisite_rhcs_iso_install.yml
|
||||
include: prerequisite_rhcs_iso_install.yml
|
||||
include_tasks: prerequisite_rhcs_iso_install.yml
|
||||
when:
|
||||
- ceph_repository_type == 'iso'
|
||||
|
||||
- name: include prerequisite_rhcs_cdn_install.yml
|
||||
include: prerequisite_rhcs_cdn_install.yml
|
||||
include_tasks: prerequisite_rhcs_cdn_install.yml
|
||||
when:
|
||||
- ceph_repository_type == 'cdn'
|
||||
|
|
|
@ -1,55 +1,27 @@
|
|||
---
|
||||
- name: include installs/install_on_redhat.yml
|
||||
include: installs/install_on_redhat.yml
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
- name: include_tasks installs/install_on_redhat.yml
|
||||
include_tasks: installs/install_on_redhat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include installs/install_on_suse.yml
|
||||
include: installs/install_on_suse.yml
|
||||
when:
|
||||
- ansible_os_family == 'Suse'
|
||||
- name: include_tasks installs/install_on_suse.yml
|
||||
include_tasks: installs/install_on_suse.yml
|
||||
when: ansible_os_family == 'Suse'
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include installs/install_on_debian.yml
|
||||
include: installs/install_on_debian.yml
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- name: include_tasks installs/install_on_debian.yml
|
||||
include_tasks: installs/install_on_debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include installs/install_on_clear.yml
|
||||
include: installs/install_on_clear.yml
|
||||
when:
|
||||
- ansible_os_family == 'ClearLinux'
|
||||
- name: include_tasks installs/install_on_clear.yml
|
||||
include_tasks: installs/install_on_clear.yml
|
||||
when: ansible_os_family == 'ClearLinux'
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include ntp debian setup tasks
|
||||
include: "misc/ntp_debian.yml"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ntp_service_enabled
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include ntp rpm setup tasks
|
||||
include: "misc/ntp_rpm.yml"
|
||||
when:
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
- ntp_service_enabled
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: get ceph version
|
||||
command: ceph --version
|
||||
|
@ -63,29 +35,14 @@
|
|||
|
||||
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
|
||||
- name: include release-rhcs.yml
|
||||
include: release-rhcs.yml
|
||||
include_tasks: release-rhcs.yml
|
||||
when:
|
||||
- ceph_repository in ['rhcs', 'dev']
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: include checks/check_firewall.yml
|
||||
include: checks/check_firewall.yml
|
||||
when:
|
||||
- check_firewall
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include misc/configure_firewall_rpm.yml
|
||||
include: misc/configure_firewall_rpm.yml
|
||||
when:
|
||||
- configure_firewall
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- name: include facts_mon_fsid.yml
|
||||
include: facts_mon_fsid.yml
|
||||
include_tasks: facts_mon_fsid.yml
|
||||
run_once: true
|
||||
when:
|
||||
- cephx
|
||||
|
@ -94,13 +51,13 @@
|
|||
- ceph_current_status.fsid is defined
|
||||
|
||||
- name: include create_rbd_client_dir.yml
|
||||
include: create_rbd_client_dir.yml
|
||||
include_tasks: create_rbd_client_dir.yml
|
||||
|
||||
- name: include configure_cluster_name.yml
|
||||
include: configure_cluster_name.yml
|
||||
include_tasks: configure_cluster_name.yml
|
||||
|
||||
- name: include configure_memory_allocator.yml
|
||||
include: configure_memory_allocator.yml
|
||||
include_tasks: configure_memory_allocator.yml
|
||||
when:
|
||||
- (ceph_tcmalloc_max_total_thread_cache | int) > 0
|
||||
- osd_objectstore == 'filestore'
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
- name: install ntp on debian
|
||||
package:
|
||||
name: ntp
|
||||
state: present
|
||||
|
||||
- name: start the ntp service
|
||||
service:
|
||||
name: ntp
|
||||
enabled: yes
|
||||
state: started
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
- name: install ntp
|
||||
package:
|
||||
name: ntp
|
||||
state: present
|
||||
|
||||
- name: start the ntp service
|
||||
service:
|
||||
name: ntpd
|
||||
enabled: yes
|
||||
state: started
|
|
@ -3,28 +3,28 @@
|
|||
set_fact:
|
||||
ceph_release: jewel
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('10', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('10', '==')
|
||||
|
||||
- name: set_fact ceph_release kraken
|
||||
set_fact:
|
||||
ceph_release: kraken
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('11', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('11', '==')
|
||||
|
||||
- name: set_fact ceph_release luminous
|
||||
set_fact:
|
||||
ceph_release: luminous
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('12', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('12', '==')
|
||||
|
||||
- name: set_fact ceph_release mimic
|
||||
set_fact:
|
||||
ceph_release: mimic
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('13', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('13', '==')
|
||||
|
||||
- name: set_fact ceph_release nautilus
|
||||
set_fact:
|
||||
ceph_release: nautilus
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('14', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('14', '==')
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
---
|
||||
- name: include create_ceph_initial_dirs.yml
|
||||
include_tasks: create_ceph_initial_dirs.yml
|
||||
|
||||
# ceph-common
|
||||
- block:
|
||||
- name: create ceph conf directory
|
||||
|
@ -24,16 +27,50 @@
|
|||
- lvm_volumes | default([]) | length > 0
|
||||
- osd_scenario == 'lvm'
|
||||
|
||||
# This is a best guess. Ideally we'd like to use `ceph-volume lvm batch --report` to get
|
||||
# a more accurate number but the ceph.conf needs to be in place before that is possible.
|
||||
# There is a tracker to add functionality to ceph-volume which would allow doing this
|
||||
# without the need for a ceph.conf: http://tracker.ceph.com/issues/36088
|
||||
- name: count number of osds for lvm batch scenario
|
||||
set_fact:
|
||||
num_osds: "{{ devices | length | int * osds_per_device | default(1) }}"
|
||||
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
|
||||
ceph_volume:
|
||||
cluster: "{{ cluster }}"
|
||||
objectstore: "{{ osd_objectstore }}"
|
||||
batch_devices: "{{ devices }}"
|
||||
osds_per_device: "{{ osds_per_device | default(1) | int }}"
|
||||
journal_size: "{{ journal_size }}"
|
||||
block_db_size: "{{ block_db_size }}"
|
||||
report: true
|
||||
action: "batch"
|
||||
register: lvm_batch_report
|
||||
environment:
|
||||
CEPH_VOLUME_DEBUG: 1
|
||||
when:
|
||||
- devices | default([]) | length > 0
|
||||
- osd_scenario == 'lvm'
|
||||
|
||||
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
|
||||
set_fact:
|
||||
num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
|
||||
when:
|
||||
- devices | default([]) | length > 0
|
||||
- osd_scenario == 'lvm'
|
||||
- (lvm_batch_report.stdout | from_json).changed
|
||||
|
||||
- name: run 'ceph-volume lvm list' to see how many osds have already been created
|
||||
ceph_volume:
|
||||
action: "list"
|
||||
register: lvm_list
|
||||
environment:
|
||||
CEPH_VOLUME_DEBUG: 1
|
||||
when:
|
||||
- devices | default([]) | length > 0
|
||||
- osd_scenario == 'lvm'
|
||||
- not (lvm_batch_report.stdout | from_json).changed
|
||||
|
||||
- name: set_fact num_osds from the output of 'ceph-volume lvm list'
|
||||
set_fact:
|
||||
num_osds: "{{ lvm_list.stdout | from_json | length | int }}"
|
||||
when:
|
||||
- devices | default([]) | length > 0
|
||||
- osd_scenario == 'lvm'
|
||||
- not (lvm_batch_report.stdout | from_json).changed
|
||||
|
||||
when:
|
||||
- inventory_hostname in groups.get(osd_group_name, [])
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ fsid = {{ fsid }}
|
|||
{% if common_single_host_mode is defined and common_single_host_mode %}
|
||||
osd crush chooseleaf type = 0
|
||||
{% endif %}
|
||||
{% if ceph_version not in ['jewel', 'kraken', 'luminous'] and containerized_deployment %}
|
||||
{% if ceph_release not in ['jewel', 'kraken', 'luminous'] and containerized_deployment %}
|
||||
# let's force the admin socket the way it was so we can properly check for existing instances
|
||||
# also the line $cluster-$name.$pid.$cctid.asok is only needed when running multiple instances
|
||||
# of the same daemon, thing ceph-ansible cannot do at the time of writing
|
||||
|
@ -44,11 +44,11 @@ mon initial members = {% for host in groups[mon_group_name] %}
|
|||
{% if not containerized_deployment and not containerized_deployment_with_kv -%}
|
||||
mon host = {% if nb_mon > 0 %}
|
||||
{% for host in groups[mon_group_name] -%}
|
||||
{% if monitor_address_block != 'subnet' %}
|
||||
{% if hostvars[host]['monitor_address_block'] is defined and hostvars[host]['monitor_address_block'] != 'subnet' %}
|
||||
{% if ip_version == 'ipv4' -%}
|
||||
{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}
|
||||
{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }}
|
||||
{%- elif ip_version == 'ipv6' -%}
|
||||
[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}]
|
||||
[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }}]
|
||||
{%- endif %}
|
||||
{% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%}
|
||||
{% if ip_version == 'ipv4' -%}
|
||||
|
@ -84,11 +84,11 @@ log file = /dev/null
|
|||
mon cluster log file = /dev/null
|
||||
mon host = {% if nb_mon > 0 %}
|
||||
{% for host in groups[mon_group_name] -%}
|
||||
{% if monitor_address_block != 'subnet' %}
|
||||
{% if hostvars[host]['monitor_address_block'] is defined and hostvars[host]['monitor_address_block'] != 'subnet' %}
|
||||
{% if ip_version == 'ipv4' -%}
|
||||
{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}
|
||||
{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }}
|
||||
{%- elif ip_version == 'ipv6' -%}
|
||||
[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}]
|
||||
[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }}]
|
||||
{%- endif %}
|
||||
{% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%}
|
||||
{% if ip_version == 'ipv4' -%}
|
||||
|
|
|
@ -48,16 +48,10 @@ client_group_name: clients
|
|||
iscsi_gw_group_name: iscsigws
|
||||
mgr_group_name: mgrs
|
||||
|
||||
# If check_firewall is true, then ansible will try to determine if the
|
||||
# Ceph ports are blocked by a firewall. If the machine running ansible
|
||||
# cannot reach the Ceph ports for some other reason, you may need or
|
||||
# want to set this to False to skip those checks.
|
||||
check_firewall: False
|
||||
|
||||
# If configure_firewall is true, then ansible will try to configure the
|
||||
# appropriate firewalling rules so that Ceph daemons can communicate
|
||||
# with each others.
|
||||
configure_firewall: False
|
||||
configure_firewall: True
|
||||
|
||||
# Open ports on corresponding nodes if firewall is installed on it
|
||||
ceph_mon_firewall_zone: public
|
||||
|
@ -97,10 +91,15 @@ suse_package_dependencies:
|
|||
# Whether or not to install the ceph-test package.
|
||||
ceph_test: false
|
||||
|
||||
# Enable the ntp service by default to avoid clock skew on
|
||||
# ceph nodes
|
||||
# Enable the ntp service by default to avoid clock skew on ceph nodes
|
||||
# Disable if an appropriate NTP client is already installed and configured
|
||||
ntp_service_enabled: true
|
||||
|
||||
# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
|
||||
# Note that this selection is currently ignored on containerized deployments
|
||||
ntp_daemon_type: timesyncd
|
||||
|
||||
|
||||
# Set uid/gid to default '64045' for bootstrap directories.
|
||||
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
|
||||
# These values have to be set according to the base OS used by the container image, NOT the host.
|
||||
|
@ -206,7 +205,7 @@ ceph_rhcs_cdn_debian_repo_version: "/3-release/" # for GA, later for updates use
|
|||
#
|
||||
#
|
||||
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
#ceph_stable_openstack_release_uca: liberty
|
||||
#ceph_stable_openstack_release_uca: queens
|
||||
#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
|
||||
|
||||
# REPOSITORY: openSUSE OBS
|
||||
|
@ -347,8 +346,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
|
|||
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
|
||||
|
||||
cephfs_pools:
|
||||
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
|
||||
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
|
||||
- { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
|
||||
- { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
|
||||
|
||||
## OSD options
|
||||
#
|
||||
|
@ -357,12 +356,13 @@ hci_safety_factor: 0.2
|
|||
non_hci_safety_factor: 0.7
|
||||
osd_memory_target: 4000000000
|
||||
journal_size: 5120 # OSD journal size in MB
|
||||
block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
|
||||
public_network: 0.0.0.0/0
|
||||
cluster_network: "{{ public_network | regex_replace(' ', '') }}"
|
||||
osd_mkfs_type: xfs
|
||||
osd_mkfs_options_xfs: -f -i size=2048
|
||||
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
|
||||
osd_objectstore: filestore
|
||||
osd_objectstore: bluestore
|
||||
|
||||
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
|
||||
# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
|
||||
|
@ -487,7 +487,7 @@ ceph_conf_overrides: {}
|
|||
# OS TUNING #
|
||||
#############
|
||||
|
||||
disable_transparent_hugepage: true
|
||||
disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
|
||||
os_tuning_params:
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
- { name: vm.zone_reclaim_mode, value: 0 }
|
||||
|
@ -508,6 +508,10 @@ docker: false
|
|||
ceph_docker_image: "ceph/daemon"
|
||||
ceph_docker_image_tag: latest
|
||||
ceph_docker_registry: docker.io
|
||||
## Client only docker image - defaults to {{ ceph_docker_image }}
|
||||
ceph_client_docker_image: "{{ ceph_docker_image }}"
|
||||
ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
|
||||
ceph_client_docker_registry: "{{ ceph_docker_registry }}"
|
||||
ceph_docker_enable_centos_extra_repo: false
|
||||
ceph_docker_on_openstack: false
|
||||
containerized_deployment: False
|
||||
|
@ -546,6 +550,7 @@ openstack_glance_pool:
|
|||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
openstack_cinder_pool:
|
||||
name: "volumes"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -555,6 +560,7 @@ openstack_cinder_pool:
|
|||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
openstack_nova_pool:
|
||||
name: "vms"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -564,6 +570,7 @@ openstack_nova_pool:
|
|||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
openstack_cinder_backup_pool:
|
||||
name: "backups"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -573,6 +580,7 @@ openstack_cinder_backup_pool:
|
|||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
openstack_gnocchi_pool:
|
||||
name: "metrics"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
|
@ -582,6 +590,27 @@ openstack_gnocchi_pool:
|
|||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
openstack_cephfs_data_pool:
|
||||
name: "manila_data"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
rule_name: "replicated_rule"
|
||||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
openstack_cephfs_metadata_pool:
|
||||
name: "manila_metadata"
|
||||
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
|
||||
rule_name: "replicated_rule"
|
||||
type: 1
|
||||
erasure_profile: ""
|
||||
expected_num_objects: ""
|
||||
application: "rbd"
|
||||
size: ""
|
||||
|
||||
openstack_pools:
|
||||
- "{{ openstack_glance_pool }}"
|
||||
|
@ -589,6 +618,8 @@ openstack_pools:
|
|||
- "{{ openstack_nova_pool }}"
|
||||
- "{{ openstack_cinder_backup_pool }}"
|
||||
- "{{ openstack_gnocchi_pool }}"
|
||||
- "{{ openstack_cephfs_data_pool }}"
|
||||
- "{{ openstack_cephfs_metadata_pool }}"
|
||||
|
||||
|
||||
# The value for 'key' can be a pre-generated key,
|
||||
|
|
|
@ -209,21 +209,21 @@
|
|||
ceph_uid: 64045
|
||||
when:
|
||||
- containerized_deployment
|
||||
- ceph_docker_image_tag | search("ubuntu")
|
||||
- ceph_docker_image_tag | string is search("ubuntu")
|
||||
|
||||
- name: set_fact ceph_uid for red hat based system - container
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when:
|
||||
- containerized_deployment
|
||||
- ceph_docker_image_tag | search("latest") or ceph_docker_image_tag | search("centos") or ceph_docker_image_tag | search("fedora")
|
||||
- ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
|
||||
|
||||
- name: set_fact ceph_uid for red hat
|
||||
set_fact:
|
||||
ceph_uid: 167
|
||||
when:
|
||||
- containerized_deployment
|
||||
- ceph_docker_image | search("rhceph")
|
||||
- ceph_docker_image is search("rhceph")
|
||||
|
||||
- name: set_fact rgw_hostname
|
||||
set_fact:
|
||||
|
@ -238,4 +238,4 @@
|
|||
- inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
|
||||
- ceph_current_status['servicemap'] is defined
|
||||
- ceph_current_status['servicemap']['services'] is defined
|
||||
- ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
- ceph_current_status['servicemap']['services']['rgw'] is defined
|
||||
|
|
|
@ -1,9 +1,3 @@
|
|||
---
|
||||
- name: include check_running_cluster.yml
|
||||
include: check_running_cluster.yml
|
||||
|
||||
- name: include facts.yml
|
||||
include: facts.yml
|
||||
|
||||
- name: include create_ceph_initial_dirs.yml
|
||||
include: create_ceph_initial_dirs.yml
|
||||
include_tasks: facts.yml
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include stat_ceph_files.yml
|
||||
include: stat_ceph_files.yml
|
||||
include_tasks: stat_ceph_files.yml
|
||||
|
||||
- name: fail if we find existing cluster files
|
||||
fail:
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
- name: include system_checks.yml
|
||||
include: system_checks.yml
|
||||
include_tasks: system_checks.yml
|
||||
|
||||
- name: include check_mandatory_vars.yml
|
||||
include: check_mandatory_vars.yml
|
||||
include_tasks: check_mandatory_vars.yml
|
||||
|
||||
- name: include pre_requisites/prerequisites.yml
|
||||
include: pre_requisites/prerequisites.yml
|
||||
include_tasks: pre_requisites/prerequisites.yml
|
||||
when:
|
||||
- not is_atomic
|
||||
|
||||
|
@ -47,7 +47,7 @@
|
|||
check_mode: no
|
||||
|
||||
- name: include checks.yml
|
||||
include: checks.yml
|
||||
include_tasks: checks.yml
|
||||
when:
|
||||
- (not containerized_deployment_with_kv and
|
||||
((inventory_hostname in groups.get(mon_group_name, [])) or
|
||||
|
@ -57,32 +57,32 @@
|
|||
- not rolling_update | default(false)
|
||||
|
||||
- name: include misc/ntp_atomic.yml
|
||||
include: misc/ntp_atomic.yml
|
||||
include_tasks: misc/ntp_atomic.yml
|
||||
when:
|
||||
- is_atomic
|
||||
- ansible_os_family == 'RedHat'
|
||||
- ntp_service_enabled
|
||||
|
||||
- name: include misc/ntp_rpm.yml
|
||||
include: misc/ntp_rpm.yml
|
||||
include_tasks: misc/ntp_rpm.yml
|
||||
when:
|
||||
- not is_atomic
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
- ntp_service_enabled
|
||||
|
||||
- name: include misc/ntp_debian.yml
|
||||
include: misc/ntp_debian.yml
|
||||
include_tasks: misc/ntp_debian.yml
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ntp_service_enabled
|
||||
|
||||
- name: include fetch_image.yml
|
||||
include: fetch_image.yml
|
||||
include_tasks: fetch_image.yml
|
||||
tags:
|
||||
- fetch_container_image
|
||||
|
||||
- name: get ceph version
|
||||
command: docker run --rm --entrypoint /usr/bin/ceph {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --version
|
||||
command: docker run --rm --entrypoint /usr/bin/ceph {{ ceph_client_docker_registry}}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} --version
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
register: ceph_version
|
||||
|
@ -92,9 +92,9 @@
|
|||
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
|
||||
|
||||
- name: include release.yml
|
||||
include: release.yml
|
||||
include_tasks: release.yml
|
||||
|
||||
# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
|
||||
# # because it creates the directories needed by the latter.
|
||||
- name: include dirs_permissions.yml
|
||||
include: dirs_permissions.yml
|
||||
include_tasks: dirs_permissions.yml
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include ../checks/check_ntp_atomic.yml
|
||||
include: ../checks/check_ntp_atomic.yml
|
||||
include_tasks: ../checks/check_ntp_atomic.yml
|
||||
when: is_atomic
|
||||
|
||||
- name: start the ntp service
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include ../checks/check_ntp_debian.yml
|
||||
include: ../checks/check_ntp_debian.yml
|
||||
include_tasks: ../checks/check_ntp_debian.yml
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include ../checks/check_ntp_rpm.yml
|
||||
include: ../checks/check_ntp_rpm.yml
|
||||
include_tasks: ../checks/check_ntp_rpm.yml
|
||||
when:
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
- name: install docker on debian
|
||||
package:
|
||||
name: docker-engine
|
||||
name: "{{ 'docker-ce' if ansible_architecture == 'aarch64' else 'docker-engine' }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
- name: include remove_ceph_udev_rules.yml
|
||||
include: remove_ceph_udev_rules.yml
|
||||
include_tasks: remove_ceph_udev_rules.yml
|
||||
|
||||
- name: include debian_prerequisites.yml
|
||||
include: debian_prerequisites.yml
|
||||
include_tasks: debian_prerequisites.yml
|
||||
when:
|
||||
- ansible_distribution == 'Debian'
|
||||
tags:
|
||||
|
|
|
@ -3,28 +3,28 @@
|
|||
set_fact:
|
||||
ceph_release: jewel
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('10', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('10', '==')
|
||||
|
||||
- name: set_fact ceph_release kraken
|
||||
set_fact:
|
||||
ceph_release: kraken
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('11', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('11', '==')
|
||||
|
||||
- name: set_fact ceph_release luminous
|
||||
set_fact:
|
||||
ceph_release: luminous
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('12', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('12', '==')
|
||||
|
||||
- name: set_fact ceph_release mimic
|
||||
set_fact:
|
||||
ceph_release: mimic
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('13', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('13', '==')
|
||||
|
||||
- name: set_fact ceph_release nautilus
|
||||
set_fact:
|
||||
ceph_release: nautilus
|
||||
when:
|
||||
- ceph_version.split('.')[0] | version_compare('14', '==')
|
||||
- ceph_version.split('.')[0] is version_compare('14', '==')
|
||||
|
|
|
@ -1,33 +1,21 @@
|
|||
---
|
||||
- name: find ceph keys
|
||||
- name: lookup keys in /etc/ceph
|
||||
shell: ls -1 /etc/ceph/*.keyring
|
||||
changed_when: false
|
||||
register: ceph_keys
|
||||
check_mode: no
|
||||
|
||||
- name: set keys permissions
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
mode: "{{ ceph_keyring_permissions }}"
|
||||
owner: root
|
||||
group: root
|
||||
with_items:
|
||||
- "{{ ceph_keys.stdout_lines }}"
|
||||
|
||||
- name: set_fact bootstrap_rbd_keyring
|
||||
set_fact:
|
||||
bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
|
||||
when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
|
||||
- name: copy keys to the ansible server
|
||||
- name: "copy ceph user and bootstrap keys to the ansible server in {{ fetch_directory }}/{{ fsid }}/"
|
||||
fetch:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
flat: yes
|
||||
fail_on_missing: false
|
||||
run_once: true
|
||||
with_items:
|
||||
- "{{ ceph_keys.stdout_lines }}"
|
||||
- "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring"
|
||||
- "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
|
||||
- "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
|
||||
- "{{ bootstrap_rbd_keyring | default([]) }}"
|
||||
- "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
|
||||
- "/var/lib/ceph/bootstrap-mgr/{{ cluster }}.keyring"
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [2014] [Guillaume Abrioux]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,2 @@
|
|||
# Ansible role: ceph-handler
|
||||
Documentation is available at http://docs.ceph.com/ceph-ansible/.
|
|
@ -105,8 +105,8 @@
|
|||
- osd_group_name in group_names
|
||||
- containerized_deployment
|
||||
- not rolling_update
|
||||
- ceph_osd_container_stat.get('rc') == 0
|
||||
- inventory_hostname == groups.get(osd_group_name) | last
|
||||
- ceph_osd_container_stat.get('rc') == 0
|
||||
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
|
||||
- handler_health_osd_check
|
||||
- hostvars[item]['_osd_handler_called'] | default(False)
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
galaxy_info:
|
||||
author: Sébastien Han
|
||||
description: Contains handlers for Ceph services
|
||||
license: Apache
|
||||
min_ansible_version: 2.3
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- 7
|
||||
categories:
|
||||
- system
|
||||
dependencies: []
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
- name: include check_running_containers.yml
|
||||
include: check_running_containers.yml
|
||||
include_tasks: check_running_containers.yml
|
||||
when:
|
||||
- containerized_deployment
|
||||
|
||||
- name: include check_socket_non_container.yml
|
||||
include: check_socket_non_container.yml
|
||||
include_tasks: check_socket_non_container.yml
|
||||
when:
|
||||
- not containerized_deployment
|
|
@ -9,7 +9,7 @@
|
|||
- inventory_hostname in groups.get(mon_group_name, [])
|
||||
|
||||
- name: check for an osd container
|
||||
command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
|
||||
command: "docker ps -q --filter='name=ceph-osd'"
|
||||
register: ceph_osd_container_stat
|
||||
changed_when: false
|
||||
failed_when: false
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
- name: include check_running_cluster.yml
|
||||
include: check_running_cluster.yml
|
|
@ -14,7 +14,6 @@ $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok
|
|||
# First, restart the daemon
|
||||
systemctl restart ceph-mds@${MDS_NAME}
|
||||
|
||||
COUNT=10
|
||||
# Wait and ensure the socket exists after restarting the daemds
|
||||
while [ $RETRIES -ne 0 ]; do
|
||||
$DOCKER_EXEC test -S $SOCKET && exit 0
|
|
@ -15,7 +15,6 @@ systemctl reset-failed ceph-mgr@${MGR_NAME}
|
|||
# First, restart the daemon
|
||||
systemctl restart ceph-mgr@${MGR_NAME}
|
||||
|
||||
COUNT=10
|
||||
# Wait and ensure the socket exists after restarting the daemds
|
||||
while [ $RETRIES -ne 0 ]; do
|
||||
$DOCKER_EXEC test -S $SOCKET && exit 0
|
|
@ -11,7 +11,6 @@ DOCKER_EXEC="docker exec ceph-nfs-{{ ansible_hostname }}"
|
|||
# First, restart the daemon
|
||||
{% if containerized_deployment -%}
|
||||
systemctl restart $NFS_NAME
|
||||
COUNT=10
|
||||
# Wait and ensure the pid exists after restarting the daemon
|
||||
while [ $RETRIES -ne 0 ]; do
|
||||
$DOCKER_EXEC test -f $PID && exit 0
|
|
@ -1,6 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
RETRIES="{{ handler_health_osd_check_retries }}"
|
||||
DELAY="{{ handler_health_osd_check_delay }}"
|
||||
CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
|
||||
|
||||
|
@ -10,7 +9,7 @@ check_pgs() {
|
|||
return 0
|
||||
fi
|
||||
while [ $RETRIES -ne 0 ]; do
|
||||
test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')"
|
||||
test "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]])')"
|
||||
RET=$?
|
||||
test $RET -eq 0 && return 0
|
||||
sleep $DELAY
|
||||
|
@ -61,23 +60,28 @@ get_docker_osd_id() {
|
|||
|
||||
# For containerized deployments, the unit file looks like: ceph-osd@sda.service
|
||||
# For non-containerized deployments, the unit file looks like: ceph-osd@NNN.service where NNN is OSD ID
|
||||
for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]{1,}|[a-z]+).service"); do
|
||||
for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]+|[a-z]+).service"); do
|
||||
# First, restart daemon(s)
|
||||
systemctl restart "${unit}"
|
||||
# We need to wait because it may take some time for the socket to actually exists
|
||||
COUNT=10
|
||||
# Wait and ensure the socket exists after restarting the daemon
|
||||
{% if containerized_deployment -%}
|
||||
{% if containerized_deployment and osd_scenario != 'lvm' -%}
|
||||
id=$(get_dev_name "$unit")
|
||||
container_id=$(get_docker_id_from_dev_name "$id")
|
||||
wait_for_socket_in_docker "$container_id"
|
||||
osd_id=$whoami
|
||||
docker_exec="docker exec $container_id"
|
||||
{% elif containerized_deployment and osd_scenario == 'lvm' %}
|
||||
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
|
||||
container_id=$(get_docker_id_from_dev_name "ceph-osd-${osd_id}")
|
||||
docker_exec="docker exec $container_id"
|
||||
{% else %}
|
||||
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]{1,}')
|
||||
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
|
||||
{% endif %}
|
||||
SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok
|
||||
while [ $COUNT -ne 0 ]; do
|
||||
RETRIES="{{ handler_health_osd_check_retries }}"
|
||||
$docker_exec test -S "$SOCKET" && check_pgs && continue 2
|
||||
sleep $DELAY
|
||||
let COUNT=COUNT-1
|
|
@ -6,18 +6,14 @@ RBD_MIRROR_NAME="{{ ansible_hostname }}"
|
|||
{% if containerized_deployment %}
|
||||
DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
|
||||
{% endif %}
|
||||
{% if ceph_release_num[ceph_release] < ceph_release_num['luminous'] %}
|
||||
SOCKET=/var/run/ceph/{{ cluster }}-client.admin.asok
|
||||
{% else %}
|
||||
|
||||
# Backward compatibility
|
||||
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
|
||||
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
|
||||
{% endif %}
|
||||
|
||||
# First, restart the daemon
|
||||
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
|
||||
|
||||
COUNT=10
|
||||
# Wait and ensure the socket exists after restarting the daemon
|
||||
while [ $RETRIES -ne 0 ]; do
|
||||
$DOCKER_EXEC test -S $SOCKET && exit 0
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
galaxy_info:
|
||||
author: Guillaume Abrioux
|
||||
description: Handles ceph infra requirements (ntp, firewall, ...)
|
||||
license: Apache
|
||||
min_ansible_version: 2.3
|
||||
platforms:
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- xenial
|
||||
- name: EL
|
||||
versions:
|
||||
- 7
|
||||
- name: opensuse
|
||||
versions:
|
||||
- 42.3
|
||||
categories:
|
||||
- system
|
||||
dependencies: []
|
|
@ -9,6 +9,8 @@
|
|||
changed_when: false
|
||||
tags:
|
||||
- firewall
|
||||
when:
|
||||
- not containerized_deployment
|
||||
|
||||
- name: start firewalld
|
||||
service:
|
||||
|
@ -16,20 +18,22 @@
|
|||
state: started
|
||||
enabled: yes
|
||||
when:
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- firewalld_pkg_query.get('rc', 1) == 0
|
||||
or is_atomic
|
||||
|
||||
- name: open monitor ports
|
||||
firewalld:
|
||||
service: ceph-mon
|
||||
zone: "{{ ceph_mon_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- mon_group_name is defined
|
||||
- mon_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -37,15 +41,15 @@
|
|||
firewalld:
|
||||
service: ceph
|
||||
zone: "{{ ceph_mgr_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
- mgr_group_name is defined
|
||||
- mgr_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -53,14 +57,18 @@
|
|||
firewalld:
|
||||
service: ceph
|
||||
zone: "{{ ceph_osd_firewall_zone }}"
|
||||
source: "{{ item }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
with_items:
|
||||
- "{{ public_network }}"
|
||||
- "{{ cluster_network }}"
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- osd_group_name is defined
|
||||
- osd_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -68,14 +76,15 @@
|
|||
firewalld:
|
||||
port: "{{ radosgw_frontend_port }}/tcp"
|
||||
zone: "{{ ceph_rgw_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- rgw_group_name is defined
|
||||
- rgw_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -83,14 +92,15 @@
|
|||
firewalld:
|
||||
service: ceph
|
||||
zone: "{{ ceph_mds_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- mds_group_name is defined
|
||||
- mds_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -98,14 +108,15 @@
|
|||
firewalld:
|
||||
service: nfs
|
||||
zone: "{{ ceph_nfs_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- nfs_group_name is defined
|
||||
- nfs_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -113,14 +124,15 @@
|
|||
firewalld:
|
||||
port: "111/tcp"
|
||||
zone: "{{ ceph_nfs_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- nfs_group_name is defined
|
||||
- nfs_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -128,14 +140,15 @@
|
|||
firewalld:
|
||||
port: "{{ restapi_port }}/tcp"
|
||||
zone: "{{ ceph_restapi_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- restapi_group_name is defined
|
||||
- restapi_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -143,14 +156,15 @@
|
|||
firewalld:
|
||||
service: ceph
|
||||
zone: "{{ ceph_rbdmirror_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- rbdmirror_group_name is defined
|
||||
- rbdmirror_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
||||
|
@ -158,14 +172,15 @@
|
|||
firewalld:
|
||||
port: "5001/tcp"
|
||||
zone: "{{ ceph_iscsi_firewall_zone }}"
|
||||
source: "{{ public_network }}"
|
||||
permanent: true
|
||||
immediate: false # if true then fails in case firewalld is stopped
|
||||
immediate: true
|
||||
state: enabled
|
||||
notify: restart firewalld
|
||||
when:
|
||||
- iscsi_group_name is defined
|
||||
- iscsi_group_name in group_names
|
||||
- firewalld_pkg_query.rc == 0
|
||||
- (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
|
||||
tags:
|
||||
- firewall
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
- name: include_tasks configure_firewall.yml
|
||||
include_tasks: configure_firewall.yml
|
||||
when:
|
||||
- configure_firewall
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
tags: configure_firewall
|
||||
|
||||
- name: include_tasks "ntp_debian.yml"
|
||||
include_tasks: "ntp_debian.yml"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ntp_service_enabled
|
||||
tags: configure_ntp
|
||||
|
||||
- name: include_tasks "ntp_rpm.yml"
|
||||
include_tasks: "ntp_rpm.yml"
|
||||
when:
|
||||
- ansible_os_family in ['RedHat', 'Suse']
|
||||
- ntp_service_enabled
|
||||
tags: configure_ntp
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
- name: setup ntpd
|
||||
block:
|
||||
- command: timedatectl set-ntp no
|
||||
- package:
|
||||
name: ntp
|
||||
state: present
|
||||
- service:
|
||||
name: ntp
|
||||
enabled: yes
|
||||
state: started
|
||||
when: ntp_daemon_type == "ntpd"
|
||||
|
||||
- name: setup chrony
|
||||
block:
|
||||
- command: timedatectl set-ntp no
|
||||
- package:
|
||||
name: chrony
|
||||
state: present
|
||||
- service:
|
||||
name: chronyd
|
||||
enabled: yes
|
||||
state: started
|
||||
when: ntp_daemon_type == "chronyd"
|
||||
|
||||
- name: setup timesyncd
|
||||
block:
|
||||
- command: timedatectl set-ntp on
|
||||
when: ntp_daemon_type == "timesyncd"
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
- name: setup ntpd
|
||||
block:
|
||||
- command: timedatectl set-ntp no
|
||||
- package:
|
||||
name: ntp
|
||||
state: present
|
||||
- service:
|
||||
name: ntpd
|
||||
enabled: yes
|
||||
state: started
|
||||
when: ntp_daemon_type == "ntpd"
|
||||
|
||||
- name: setup chrony
|
||||
block:
|
||||
- command: timedatectl set-ntp no
|
||||
- package:
|
||||
name: chrony
|
||||
state: present
|
||||
- service:
|
||||
name: chronyd
|
||||
enabled: yes
|
||||
state: started
|
||||
when: ntp_daemon_type == "chronyd"
|
||||
|
||||
- name: setup timesyncd
|
||||
block:
|
||||
- command: timedatectl set-ntp on
|
||||
when: ntp_daemon_type == "timesyncd"
|
|
@ -56,6 +56,7 @@ client_connections: {}
|
|||
# Whether or not to generate secure certificate to iSCSI gateway nodes
|
||||
generate_crt: False
|
||||
|
||||
rbd_pool_size: ""
|
||||
|
||||
##################
|
||||
# RBD-TARGET-API #
|
||||
|
|
|
@ -32,15 +32,23 @@
|
|||
register: rbd_pool_exists
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: get default value for osd_pool_default_pg_num
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
|
||||
changed_when: false
|
||||
register: osd_pool_default_pg_num
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
- name: rbd pool related tasks
|
||||
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
|
||||
block:
|
||||
- name: get default value for osd_pool_default_pg_num
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
|
||||
changed_when: false
|
||||
register: osd_pool_default_pg_num
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: create a rbd pool if it doesn't exist
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
|
||||
- name: create a rbd pool if it doesn't exist
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
|
||||
- name: customize pool size
|
||||
command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default('') }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- rbd_pool_size | default ("") != ""
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
- name: include common.yml
|
||||
include: common.yml
|
||||
include_tasks: common.yml
|
||||
|
||||
- name: include non-container/prerequisites.yml
|
||||
include: non-container/prerequisites.yml
|
||||
include_tasks: non-container/prerequisites.yml
|
||||
when:
|
||||
- not containerized_deployment
|
||||
|
||||
|
@ -11,16 +11,16 @@
|
|||
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
|
||||
# the API for https support.
|
||||
- name: include deploy_ssl_keys.yml
|
||||
include: deploy_ssl_keys.yml
|
||||
include_tasks: deploy_ssl_keys.yml
|
||||
when:
|
||||
- generate_crt|bool
|
||||
|
||||
- name: include non-container/configure_iscsi.yml
|
||||
include: non-container/configure_iscsi.yml
|
||||
include_tasks: non-container/configure_iscsi.yml
|
||||
when:
|
||||
- not containerized_deployment
|
||||
|
||||
- name: include containerized.yml
|
||||
include: container/containerized.yml
|
||||
include_tasks: container/containerized.yml
|
||||
when:
|
||||
- containerized_deployment
|
||||
|
|
|
@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop rbd-target-api
|
|||
ExecStartPre=-/usr/bin/docker rm rbd-target-api
|
||||
ExecStart=/usr/bin/docker run --rm \
|
||||
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
|
||||
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
|
||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
||||
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
|
||||
{% else -%}
|
||||
--cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
|
||||
|
@ -16,6 +16,7 @@ ExecStart=/usr/bin/docker run --rm \
|
|||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--privileged \
|
||||
--cap-add=ALL \
|
||||
--net=host \
|
||||
-v /dev:/dev \
|
||||
-v /lib/modules:/lib/modules \
|
||||
-v /etc/ceph:/etc/ceph \
|
||||
|
|
|
@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop rbd-target-gw
|
|||
ExecStartPre=-/usr/bin/docker rm rbd-target-gw
|
||||
ExecStart=/usr/bin/docker run --rm \
|
||||
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
|
||||
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
|
||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
||||
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
|
||||
{% else -%}
|
||||
--cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
|
||||
|
@ -16,6 +16,7 @@ ExecStart=/usr/bin/docker run --rm \
|
|||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--privileged \
|
||||
--cap-add=ALL \
|
||||
--net=host \
|
||||
-v /dev:/dev \
|
||||
-v /lib/modules:/lib/modules \
|
||||
-v /etc/ceph:/etc/ceph \
|
||||
|
|
|
@ -8,13 +8,14 @@ ExecStartPre=-/usr/bin/docker stop tcmu-runner
|
|||
ExecStartPre=-/usr/bin/docker rm tcmu-runner
|
||||
ExecStart=/usr/bin/docker run --rm \
|
||||
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
|
||||
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
|
||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
||||
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
|
||||
{% else -%}
|
||||
--cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
|
||||
{% endif -%}
|
||||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--privileged \
|
||||
--net=host \
|
||||
--cap-add=ALL \
|
||||
-v /dev:/dev \
|
||||
-v /lib/modules:/lib/modules \
|
||||
|
|
|
@ -1,10 +1,19 @@
|
|||
---
|
||||
- name: create filesystem pools
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items:
|
||||
- "{{ cephfs_pools }}"
|
||||
- name: filesystem pools related tasks
|
||||
block:
|
||||
- name: create filesystem pools
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
with_items:
|
||||
- "{{ cephfs_pools }}"
|
||||
|
||||
- name: customize pool size
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default('') }}"
|
||||
with_items: "{{ cephfs_pools | unique }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: false
|
||||
when: item.size | default ("") != ""
|
||||
|
||||
- name: check if ceph filesystem already exists
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
|
||||
|
@ -29,7 +38,6 @@
|
|||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- check_existing_cephfs.rc != 0
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
|
||||
- name: allow multimds
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it"
|
||||
|
@ -43,5 +51,4 @@
|
|||
changed_when: false
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.jewel
|
||||
- mds_max_mds > 1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: include create_mds_filesystems.yml
|
||||
include: create_mds_filesystems.yml
|
||||
include_tasks: create_mds_filesystems.yml
|
||||
when:
|
||||
- inventory_hostname == groups[mds_group_name] | first
|
||||
|
||||
|
@ -11,12 +11,12 @@
|
|||
- containerized_deployment
|
||||
|
||||
- name: include common.yml
|
||||
include: common.yml
|
||||
include_tasks: common.yml
|
||||
|
||||
- name: non_containerized.yml
|
||||
include: non_containerized.yml
|
||||
include_tasks: non_containerized.yml
|
||||
when: not containerized_deployment
|
||||
|
||||
- name: containerized.yml
|
||||
include: containerized.yml
|
||||
include_tasks: containerized.yml
|
||||
when: containerized_deployment
|
||||
|
|
|
@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
|
|||
ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
|
||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||
--memory={{ ceph_mds_docker_memory_limit }} \
|
||||
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
|
||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
||||
--cpus={{ ceph_mds_docker_cpu_limit }} \
|
||||
{% else -%}
|
||||
--cpu-quota={{ ceph_mds_docker_cpu_limit * 100000 }} \
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
---
|
||||
- name: include start_docker_mgr.yml
|
||||
include: start_docker_mgr.yml
|
||||
include_tasks: start_docker_mgr.yml
|
||||
|
|
|
@ -6,34 +6,28 @@
|
|||
- containerized_deployment
|
||||
|
||||
- name: include common.yml
|
||||
include: common.yml
|
||||
include_tasks: common.yml
|
||||
|
||||
- name: include pre_requisite.yml
|
||||
include: pre_requisite.yml
|
||||
include_tasks: pre_requisite.yml
|
||||
when: not containerized_deployment
|
||||
|
||||
- name: include docker/main.yml
|
||||
include: docker/main.yml
|
||||
include_tasks: docker/main.yml
|
||||
when: containerized_deployment
|
||||
|
||||
- name: get enabled modules from ceph-mgr
|
||||
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
|
||||
register: _ceph_mgr_modules
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
|
||||
- name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict)
|
||||
set_fact:
|
||||
_ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}"
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
|
||||
- name: set _disabled_ceph_mgr_modules fact
|
||||
set_fact:
|
||||
_disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}"
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
|
||||
- name: disable ceph mgr enabled modules
|
||||
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
|
||||
|
@ -44,12 +38,10 @@
|
|||
when:
|
||||
- item not in ceph_mgr_modules
|
||||
- not _ceph_mgr_modules.get('skipped')
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
|
||||
- name: add modules to ceph-mgr
|
||||
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
|
||||
with_items: "{{ ceph_mgr_modules }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
when:
|
||||
- (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num['luminous']
|
||||
- (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
|
|
@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
|
|||
ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }}
|
||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||
--memory={{ ceph_mgr_docker_memory_limit }} \
|
||||
{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%}
|
||||
{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
|
||||
--cpus={{ ceph_mgr_docker_cpu_limit }} \
|
||||
{% else -%}
|
||||
--cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \
|
||||
|
|
|
@ -5,15 +5,6 @@
|
|||
check_mode: no
|
||||
when:
|
||||
- cephx
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
|
||||
- name: collect admin and bootstrap keys
|
||||
command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
|
||||
changed_when: false
|
||||
check_mode: no
|
||||
when:
|
||||
- cephx
|
||||
- ceph_release_num[ceph_release] < ceph_release_num.luminous
|
||||
|
||||
# NOTE (leseb): wait for mon discovery and quorum resolution
|
||||
# the admin key is not instantaneously created so we have to wait a bit
|
||||
|
@ -81,7 +72,6 @@
|
|||
- cephx
|
||||
- groups.get(mgr_group_name, []) | length > 0
|
||||
- inventory_hostname == groups[mon_group_name]|last
|
||||
- ceph_release_num[ceph_release] > ceph_release_num.jewel
|
||||
with_items: "{{ groups.get(mgr_group_name, []) }}"
|
||||
|
||||
# once this gets backported github.com/ceph/ceph/pull/20983
|
||||
|
@ -108,8 +98,6 @@
|
|||
- name: set_fact bootstrap_rbd_keyring
|
||||
set_fact:
|
||||
bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
|
||||
- name: copy keys to the ansible server
|
||||
fetch:
|
||||
|
|
|
@ -38,9 +38,9 @@
|
|||
- inventory_hostname == groups.get(mon_group_name) | last
|
||||
- not item.get('skipped', false)
|
||||
|
||||
- name: set_fact osd_pool_default_crush_rule to osd_pool_default_crush_replicated_ruleset if release < luminous else osd_pool_default_crush_rule
|
||||
- name: set_fact osd_pool_default_crush_rule
|
||||
set_fact:
|
||||
osd_pool_default_crush_rule: "{{ 'osd_pool_default_crush_replicated_ruleset' if ceph_release_num[ceph_release] < ceph_release_num.luminous else 'osd_pool_default_crush_rule' }}"
|
||||
osd_pool_default_crush_rule: "osd_pool_default_crush_rule"
|
||||
|
||||
- name: insert new default crush rule into daemon to prevent restart
|
||||
command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set {{ osd_pool_default_crush_rule }} {{ info_ceph_default_crush_rule_yaml.rule_id }}"
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
mode: "0755"
|
||||
recurse: true
|
||||
|
||||
- name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous
|
||||
- name: set_fact client_admin_ceph_authtool_cap
|
||||
set_fact:
|
||||
client_admin_ceph_authtool_cap:
|
||||
mon: allow *
|
||||
|
@ -56,18 +56,6 @@
|
|||
mds: allow
|
||||
mgr: allow *
|
||||
when:
|
||||
- ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
- cephx
|
||||
- admin_secret != 'admin_secret'
|
||||
|
||||
- name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous
|
||||
set_fact:
|
||||
client_admin_ceph_authtool_cap:
|
||||
mon: allow *
|
||||
osd: allow *
|
||||
mds: allow
|
||||
when:
|
||||
- ceph_release_num[ceph_release] < ceph_release_num.luminous
|
||||
- cephx
|
||||
- admin_secret != 'admin_secret'
|
||||
|
||||
|
|
|
@ -7,17 +7,7 @@
|
|||
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
|
||||
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
|
||||
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
|
||||
|
||||
- name: register rbd bootstrap key
|
||||
set_fact:
|
||||
bootstrap_rbd_keyring:
|
||||
- "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
|
||||
when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
|
||||
- name: merge rbd bootstrap key to config and keys paths
|
||||
set_fact:
|
||||
ceph_config_keys: "{{ ceph_config_keys + bootstrap_rbd_keyring }}"
|
||||
when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
|
||||
- /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action:
|
||||
|
@ -43,4 +33,4 @@
|
|||
- "{{ statconfig.results }}"
|
||||
when:
|
||||
- item.1.stat.exists == true
|
||||
- item.0 | search("keyring")
|
||||
- item.0 is search("keyring")
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue