mirror of https://github.com/ceph/ceph-ansible.git
commit
28ee1fbfad
|
@ -179,8 +179,6 @@ information about how to generate appropriate values for these variables.
|
|||
By default, ceph-common installs from Ceph repository. However, you
|
||||
can set `ceph_origin` to "distro" to install Ceph from your default repository.
|
||||
|
||||
Moreover for people looking to install any version of Ceph prior to the Jewel release on a Red Hat based system you would have to set `use_server_package_split: false`.
|
||||
|
||||
## Setup for Vagrant using libvirt provider
|
||||
|
||||
* Create vagrant_variables.yml
|
||||
|
|
|
@ -4,9 +4,12 @@
|
|||
require 'yaml'
|
||||
VAGRANTFILE_API_VERSION = '2'
|
||||
|
||||
DEBUG = false
|
||||
|
||||
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
|
||||
settings=YAML.load_file(config_file)
|
||||
|
||||
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] : ""
|
||||
NMONS = settings['mon_vms']
|
||||
NOSDS = settings['osd_vms']
|
||||
NMDSS = settings['mds_vms']
|
||||
|
@ -24,18 +27,18 @@ MEMORY = settings['memory']
|
|||
STORAGECTL = settings['vagrant_storagectl']
|
||||
ETH = settings['eth']
|
||||
DOCKER = settings['docker']
|
||||
USER = settings['ssh_username']
|
||||
|
||||
if BOX == 'openstack'
|
||||
require 'vagrant-openstack-provider'
|
||||
OSVM = true
|
||||
USER = settings['os_ssh_username']
|
||||
OSUSER = settings['os_username']
|
||||
OSPREFIX = "#{OSUSER}-"
|
||||
else
|
||||
OSVM = false
|
||||
OSPREFIX = ""
|
||||
if not USER then
|
||||
USER = settings['os_ssh_username']
|
||||
end
|
||||
LABEL_PREFIX = "#{USER}-"
|
||||
end
|
||||
|
||||
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
|
||||
|
||||
ansible_provision = proc do |ansible|
|
||||
if DOCKER then
|
||||
ansible.playbook = 'site-docker.yml'
|
||||
|
@ -50,24 +53,37 @@ ansible_provision = proc do |ansible|
|
|||
# these aren't supported by Vagrant, see
|
||||
# https://github.com/mitchellh/vagrant/issues/3539
|
||||
ansible.groups = {
|
||||
'mons' => (0..NMONS - 1).map { |j| "#{OSPREFIX}mon#{j}" },
|
||||
'osds' => (0..NOSDS - 1).map { |j| "#{OSPREFIX}osd#{j}" },
|
||||
'mdss' => (0..NMDSS - 1).map { |j| "#{OSPREFIX}mds#{j}" },
|
||||
'rgws' => (0..NRGWS - 1).map { |j| "#{OSPREFIX}rgw#{j}" },
|
||||
'nfss' => (0..NNFSS - 1).map { |j| "#{OSPREFIX}nfs#{j}" },
|
||||
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{OSPREFIX}rbd_mirror#{j}" },
|
||||
'clients' => (0..CLIENTS - 1).map { |j| "#{OSPREFIX}client#{j}" },
|
||||
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{OSPREFIX}iscsi_gw#{j}" }
|
||||
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
|
||||
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
|
||||
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
|
||||
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
|
||||
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
|
||||
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
|
||||
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
|
||||
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
|
||||
}
|
||||
|
||||
if RESTAPI then
|
||||
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{OSPREFIX}mon#{j}" }
|
||||
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
|
||||
end
|
||||
|
||||
ansible.extra_vars = {
|
||||
cluster_network: "#{SUBNET}.0/24",
|
||||
journal_size: 100,
|
||||
public_network: "#{SUBNET}.0/24",
|
||||
}
|
||||
if settings['ceph_install_source'] == 'dev' then
|
||||
ansible.extra_vars['ceph_dev'] = true
|
||||
if settings['ceph_install_branch'] then
|
||||
ansible.extra_vars['ceph_dev_branch'] = settings['ceph_install_branch']
|
||||
end
|
||||
else
|
||||
ansible.extra_vars['ceph_stable'] = true
|
||||
end
|
||||
|
||||
# In a production deployment, these should be secret
|
||||
if DOCKER then
|
||||
ansible.extra_vars = {
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
mon_containerized_deployment: 'true',
|
||||
osd_containerized_deployment: 'true',
|
||||
mds_containerized_deployment: 'true',
|
||||
|
@ -78,27 +94,36 @@ ansible_provision = proc do |ansible|
|
|||
ceph_mon_docker_interface: ETH,
|
||||
ceph_mon_docker_subnet: "#{SUBNET}.0/24",
|
||||
ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE,OSD_JOURNAL_SIZE=100",
|
||||
cluster_network: "#{SUBNET}.0/24",
|
||||
public_network: "#{SUBNET}.0/24",
|
||||
ceph_osd_docker_devices: settings['disks'],
|
||||
# Note that OSVM is defaulted to false above
|
||||
ceph_docker_on_openstack: OSVM,
|
||||
ceph_docker_on_openstack: BOX == 'openstack',
|
||||
ceph_rgw_civetweb_port: 8080,
|
||||
generate_fsid: 'true',
|
||||
journal_size: 100,
|
||||
}
|
||||
})
|
||||
else
|
||||
ansible.extra_vars = {
|
||||
"ceph_#{settings['ceph_install_source']}"=> 'true',
|
||||
journal_collocation: 'true',
|
||||
pool_default_size: '2',
|
||||
journal_size: 100,
|
||||
monitor_interface: ETH,
|
||||
cluster_network: "#{SUBNET}.0/24",
|
||||
public_network: "#{SUBNET}.0/24",
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
devices: settings['disks'],
|
||||
os_tuning_params: settings['os_tuning_params']
|
||||
}
|
||||
journal_collocation: 'true',
|
||||
monitor_interface: ETH,
|
||||
os_tuning_params: settings['os_tuning_params'],
|
||||
pool_default_size: '2',
|
||||
})
|
||||
end
|
||||
|
||||
if BOX == 'linode' then
|
||||
ansible.sudo = true
|
||||
# Use monitor_address_block instead of monitor_interface:
|
||||
ansible.extra_vars.delete(:monitor_interface)
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
cluster_network: "#{SUBNET}.0/16",
|
||||
devices: ['/dev/sdc'], # hardcode leftover disk
|
||||
journal_collocation: 'true',
|
||||
monitor_address_block: "#{SUBNET}.0/16",
|
||||
public_network: "#{SUBNET}.0/16",
|
||||
})
|
||||
end
|
||||
|
||||
if DEBUG then
|
||||
ansible.verbose = '-vvv'
|
||||
end
|
||||
ansible.limit = 'all'
|
||||
end
|
||||
|
@ -114,6 +139,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
config.vm.box = BOX
|
||||
config.vm.box_url = BOX_URL
|
||||
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
|
||||
config.ssh.private_key_path = settings['ssh_private_key_path']
|
||||
config.ssh.username = USER
|
||||
|
||||
# Faster bootup. Disable if you need this for libvirt
|
||||
config.vm.provider :libvirt do |v,override|
|
||||
|
@ -124,8 +151,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
# OpenStack VMs
|
||||
config.vm.provider :openstack do |os|
|
||||
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
|
||||
config.ssh.username = USER
|
||||
config.ssh.private_key_path = settings['os_ssh_private_key_path']
|
||||
config.ssh.pty = true
|
||||
os.openstack_auth_url = settings['os_openstack_auth_url']
|
||||
os.username = settings['os_username']
|
||||
|
@ -136,14 +161,28 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
os.image = settings['os_image']
|
||||
os.keypair_name = settings['os_keypair_name']
|
||||
os.security_groups = ['default']
|
||||
os.networks = settings['os_networks']
|
||||
os.floating_ip_pool = settings['os_floating_ip_pool']
|
||||
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
|
||||
end
|
||||
elsif BOX == 'linode'
|
||||
config.vm.provider :linode do |provider, override|
|
||||
provider.token = ENV['LINODE_API_KEY']
|
||||
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
|
||||
provider.datacenter = settings['cloud_datacenter']
|
||||
provider.plan = MEMORY.to_s
|
||||
provider.private_networking = true
|
||||
# root install generally takes <1GB
|
||||
provider.xvda_size = 4*1024
|
||||
# add some swap as the Linode distros require it
|
||||
provider.swap_size = 128
|
||||
end
|
||||
end
|
||||
|
||||
(0..CLIENTS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}client#{i}" do |client|
|
||||
client.vm.hostname = "#{OSPREFIX}ceph-client#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
|
||||
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
client.vm.network :private_network, ip: "#{SUBNET}.4#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
|
@ -166,13 +205,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-client#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
client.vm.provider :linode do |provider|
|
||||
provider.label = client.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NRGWS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}rgw#{i}" do |rgw|
|
||||
rgw.vm.hostname = "#{OSPREFIX}ceph-rgw#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
|
||||
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
rgw.vm.network :private_network, ip: "#{SUBNET}.5#{i}"
|
||||
end
|
||||
|
||||
|
@ -196,13 +239,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-rgw#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
rgw.vm.provider :linode do |provider|
|
||||
provider.label = rgw.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NNFSS - 1).each do |i|
|
||||
config.vm.define "nfs#{i}" do |nfs|
|
||||
nfs.vm.hostname = "ceph-nfs#{i}"
|
||||
if !OSVM
|
||||
if ASSIGN_STATIC_IP
|
||||
nfs.vm.network :private_network, ip: "#{SUBNET}.6#{i}"
|
||||
end
|
||||
|
||||
|
@ -226,13 +273,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-nfs#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
nfs.vm.provider :linode do |provider|
|
||||
provider.label = nfs.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NMDSS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}mds#{i}" do |mds|
|
||||
mds.vm.hostname = "#{OSPREFIX}ceph-mds#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
|
||||
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
mds.vm.network :private_network, ip: "#{SUBNET}.7#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
|
@ -254,13 +305,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-mds#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
mds.vm.provider :linode do |provider|
|
||||
provider.label = mds.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NRBD_MIRRORS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}rbd_mirror#{i}" do |rbd_mirror|
|
||||
rbd_mirror.vm.hostname = "#{OSPREFIX}ceph-rbd-mirror#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
|
||||
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
rbd_mirror.vm.network :private_network, ip: "#{SUBNET}.8#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
|
@ -282,13 +337,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-rbd-mirror#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
rbd_mirror.vm.provider :linode do |provider|
|
||||
provider.label = rbd_mirror.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NISCSI_GWS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}iscsi_gw#{i}" do |iscsi_gw|
|
||||
iscsi_gw.vm.hostname = "#{OSPREFIX}ceph-iscsi-gw#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
|
||||
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
iscsi_gw.vm.network :private_network, ip: "#{SUBNET}.9#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
|
@ -310,13 +369,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-iscsi-gw#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
iscsi_gw.vm.provider :linode do |provider|
|
||||
provider.label = iscsi_gw.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NMONS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}mon#{i}" do |mon|
|
||||
mon.vm.hostname = "#{OSPREFIX}ceph-mon#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
|
||||
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
mon.vm.network :private_network, ip: "#{SUBNET}.1#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
|
@ -339,13 +402,17 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
prl.name = "ceph-mon#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
mon.vm.provider :linode do |provider|
|
||||
provider.label = mon.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NOSDS - 1).each do |i|
|
||||
config.vm.define "#{OSPREFIX}osd#{i}" do |osd|
|
||||
osd.vm.hostname = "#{OSPREFIX}ceph-osd#{i}"
|
||||
if !OSVM
|
||||
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
|
||||
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
osd.vm.network :private_network, ip: "#{SUBNET}.10#{i}"
|
||||
osd.vm.network :private_network, ip: "#{SUBNET}.20#{i}"
|
||||
end
|
||||
|
@ -400,6 +467,10 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
end
|
||||
end
|
||||
|
||||
osd.vm.provider :linode do |provider|
|
||||
provider.label = osd.vm.hostname
|
||||
end
|
||||
|
||||
# Run the provisioner after the last machine comes up
|
||||
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
|
||||
end
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
# This Dockerfile is for setting up a dev environment for launching Ceph
|
||||
# clusters on Linode.
|
||||
|
||||
FROM ubuntu:16.04
|
||||
|
||||
WORKDIR /root
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential git ansible python-netaddr rsync
|
||||
RUN wget -O vagrant_1.8.5_x86_64.deb https://releases.hashicorp.com/vagrant/1.8.5/vagrant_1.8.5_x86_64.deb
|
||||
RUN dpkg -i vagrant_1.8.5_x86_64.deb
|
||||
RUN rm -f vagrant_1.8.5_x86_64.deb
|
||||
RUN vagrant plugin install vagrant-linode
|
||||
# Download patch from https://github.com/displague/vagrant-linode/pull/66
|
||||
RUN wget -O .vagrant.d/gems/gems/vagrant-linode-0.2.7/lib/vagrant-linode/actions/create.rb https://raw.githubusercontent.com/batrick/vagrant-linode/dfa305dab9c5a8ba49b50e7d9d1159977708c2d1/lib/vagrant-linode/actions/create.rb
|
||||
RUN mkdir .ssh && ssh-keygen -f .ssh/id_rsa -t rsa -N ''
|
||||
RUN git clone https://github.com/ceph/ceph-ansible.git
|
||||
|
||||
WORKDIR /root/ceph-ansible
|
|
@ -0,0 +1,2 @@
|
|||
This directory includes Dockerfiles for building development environments to
|
||||
run ceph-ansible.
|
|
@ -41,10 +41,6 @@ dummy:
|
|||
# "state=latest".
|
||||
#upgrade_ceph_packages: False
|
||||
|
||||
# If this is false then the 'ceph' package will be installed on rpm systems, which
|
||||
# is not needed for versions after infernalis.
|
||||
#use_server_package_split: true
|
||||
|
||||
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\
|
||||
|
||||
#debian_package_dependencies:
|
||||
|
@ -113,16 +109,18 @@ dummy:
|
|||
#ceph_stable_release: jewel # ceph stable release
|
||||
#ceph_stable_repo: "http://download.ceph.com/debian-{{ ceph_stable_release }}"
|
||||
|
||||
###################
|
||||
# Stable Releases #
|
||||
###################
|
||||
#ceph_stable_releases:
|
||||
# - dumpling
|
||||
# - emperor
|
||||
# - firefly
|
||||
# - giant
|
||||
# - hammer
|
||||
# - infernalis
|
||||
######################################
|
||||
# Releases name to number dictionary #
|
||||
######################################
|
||||
#ceph_release_num:
|
||||
# dumpling: 0.67
|
||||
# emperor: 0.72
|
||||
# firefly: 0.80
|
||||
# giant: 0.87
|
||||
# hammer: 0.94
|
||||
# infernalis: 9
|
||||
# jewel: 10
|
||||
# kraken: 11
|
||||
|
||||
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
|
||||
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
|
||||
|
@ -147,8 +145,8 @@ dummy:
|
|||
#
|
||||
#ceph_rhcs: false
|
||||
# This will affect how/what repositories are enabled depending on the desired
|
||||
# version. The next version will use "2" not "2.0" which would not work.
|
||||
#ceph_rhcs_version: 1.3 # next version is 2
|
||||
# version. The previous version was 1.3. The current version is 2.
|
||||
#ceph_rhcs_version: 2
|
||||
#ceph_rhcs_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
|
||||
#ceph_rhcs_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
|
||||
#ceph_rhcs_iso_path:
|
||||
|
@ -275,6 +273,8 @@ dummy:
|
|||
## MDS options
|
||||
#
|
||||
#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
|
||||
#mds_allow_multimds: false
|
||||
#mds_max_mds: 3
|
||||
|
||||
## Rados Gateway options
|
||||
#
|
||||
|
|
|
@ -61,6 +61,12 @@ dummy:
|
|||
# name: backups
|
||||
# pg_num: "{{ pool_default_pg_num }}"
|
||||
|
||||
#openstack_pools:
|
||||
# - "{{ openstack_glance_pool }}"
|
||||
# - "{{ openstack_cinder_pool }}"
|
||||
# - "{{ openstack_nova_pool }}"
|
||||
# - "{{ openstack_cinder_backup_pool }}"
|
||||
|
||||
#openstack_keys:
|
||||
# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
|
||||
# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
|
||||
|
|
|
@ -76,6 +76,7 @@
|
|||
cluster: ceph # name of the cluster
|
||||
monitor_name: "{{ ansible_hostname }}"
|
||||
mds_name: "{{ ansible_hostname }}"
|
||||
osd_auto_discovery: false
|
||||
|
||||
|
||||
handlers:
|
||||
|
@ -86,7 +87,8 @@
|
|||
ignore_errors: true
|
||||
|
||||
- name: wait for server to boot
|
||||
local_action: wait_for port=22 host={{ inventory_hostname }} state=started delay=10 timeout=400
|
||||
become: false
|
||||
local_action: wait_for port=22 host={{ inventory_hostname }} state=started delay=10 timeout=500
|
||||
|
||||
- name: remove data
|
||||
file:
|
||||
|
@ -261,24 +263,36 @@
|
|||
rbdmirror_group_name in group_names
|
||||
|
||||
- name: check for anything running ceph
|
||||
shell: "ps awux | grep -- [c]eph-"
|
||||
shell: "ps awux | grep -- /usr/bin/[c]eph-"
|
||||
register: check_for_running_ceph
|
||||
failed_when: check_for_running_ceph.rc == 0
|
||||
|
||||
- name: see if ceph-disk-created data partitions are present
|
||||
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'"
|
||||
shell: |
|
||||
ls /dev/disk/by-partlabel | grep -q "ceph.*.data"
|
||||
failed_when: false
|
||||
register: ceph_data_partlabels
|
||||
|
||||
- name: see if ceph-disk-created journal partitions are present
|
||||
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'"
|
||||
shell: |
|
||||
ls /dev/disk/by-partlabel | grep -q "ceph.*.journal"
|
||||
failed_when: false
|
||||
register: ceph_journal_partlabels
|
||||
|
||||
- name: get ceph journal partitions
|
||||
shell: |
|
||||
blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'
|
||||
when:
|
||||
- ceph_journal_partlabels.rc == 0
|
||||
failed_when: false
|
||||
register: ceph_journal_partition_to_erase_path
|
||||
|
||||
- name: get osd data mount points
|
||||
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
|
||||
register: mounted_osd
|
||||
changed_when: false
|
||||
when:
|
||||
osd_group_name in group_names
|
||||
|
||||
- name: drop all cache
|
||||
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
|
||||
|
@ -310,6 +324,7 @@
|
|||
|
||||
- name: is reboot needed
|
||||
local_action: shell echo requesting reboot
|
||||
become: false
|
||||
notify:
|
||||
- restart machine
|
||||
- wait for server to boot
|
||||
|
@ -332,15 +347,23 @@
|
|||
ceph_data_partlabels.rc == 0 and
|
||||
zap_block_devs
|
||||
|
||||
- name: zap journal devices
|
||||
shell: ceph-disk zap "{{ item }}"
|
||||
with_items: "{{ raw_journal_devices|default([])|unique }}"
|
||||
- name: zap ceph journal partitions
|
||||
shell: |
|
||||
# if the disk passed is a raw device AND the boot system disk
|
||||
if echo "{{ item }}" | egrep -sq '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p){1,2}$' && parted -s $(echo "{{ item }}" | egrep -o '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p){1,2}') print | grep -sq boot; then
|
||||
echo "Looks like {{ item }} has a boot partition,"
|
||||
echo "if you want to delete specific partitions point to the partition instead of the raw device"
|
||||
echo "Do not use your system disk!"
|
||||
exit 1
|
||||
fi
|
||||
raw_device=$(echo "{{ item }}" | egrep -o '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p){1,2}')
|
||||
partition_nb=$(echo "{{ item }}" | egrep -o '[0-9]{1,2}$')
|
||||
sgdisk --delete $partition_nb $raw_device
|
||||
with_items: "{{ceph_journal_partition_to_erase_path.stdout_lines}}"
|
||||
when:
|
||||
osd_group_name in group_names and
|
||||
ceph_disk_present.rc == 0 and
|
||||
ceph_journal_partlabels.rc == 0 and
|
||||
zap_block_devs and
|
||||
raw_multi_journal
|
||||
zap_block_devs
|
||||
|
||||
- name: purge ceph packages with yum
|
||||
yum:
|
||||
|
|
|
@ -59,10 +59,8 @@
|
|||
vars:
|
||||
upgrade_ceph_packages: True
|
||||
mon_group_name: mons
|
||||
|
||||
pre_tasks:
|
||||
- name: compress the store as much as possible
|
||||
command: ceph tell mon.{{ ansible_hostname }} compact
|
||||
health_mon_check_retries: 5
|
||||
health_mon_check_delay: 10
|
||||
|
||||
roles:
|
||||
- ceph-common
|
||||
|
@ -96,11 +94,11 @@
|
|||
|
||||
- name: waiting for the monitor to join the quorum...
|
||||
shell: |
|
||||
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||
ceph -s --cluster {{ cluster }} | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 5
|
||||
delay: 10
|
||||
retries: "{{ health_mon_check_retries }}"
|
||||
delay: "{{ health_mon_check_delay }}"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
||||
|
||||
|
@ -110,10 +108,12 @@
|
|||
vars:
|
||||
upgrade_ceph_packages: True
|
||||
osd_group_name: osds
|
||||
health_osd_check_retries: 10
|
||||
health_osd_check_delay: 10
|
||||
|
||||
pre_tasks:
|
||||
- name: set osd flags
|
||||
command: ceph osd set {{ item }}
|
||||
command: ceph osd set {{ item }} --cluster {{ cluster }}
|
||||
with_items:
|
||||
- noout
|
||||
- noscrub
|
||||
|
@ -152,15 +152,15 @@
|
|||
|
||||
- name: waiting for clean pgs...
|
||||
shell: |
|
||||
test "$(ceph pg stat | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(ceph pg stat | sed 's/pgs.*//;s/^.*://;s/ //')" && ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
||||
test "$(ceph pg stat --cluster {{ cluster }} | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(ceph pg stat --cluster {{ cluster }} | sed 's/pgs.*//;s/^.*://;s/ //')" && ceph health --cluster {{ cluster }} | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 10
|
||||
delay: 10
|
||||
retries: "{{ health_osd_check_retries }}"
|
||||
delay: "{{ health_osd_check_delay }}"
|
||||
delegate_to: "{{ groups.mons[0] }}"
|
||||
|
||||
- name: unset osd flags
|
||||
command: ceph osd unset {{ item }}
|
||||
command: ceph osd unset {{ item }} --cluster {{ cluster }}
|
||||
with_items:
|
||||
- noout
|
||||
- noscrub
|
||||
|
|
|
@ -0,0 +1,527 @@
|
|||
---
|
||||
# This playbook switches from non-containerized to containerized Ceph daemons
|
||||
|
||||
- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons
|
||||
|
||||
hosts:
|
||||
- localhost
|
||||
|
||||
gather_facts: false
|
||||
|
||||
vars_prompt:
|
||||
- name: ireallymeanit
|
||||
prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons?
|
||||
default: 'no'
|
||||
private: no
|
||||
|
||||
tasks:
|
||||
- name: exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
|
||||
fail:
|
||||
msg: >
|
||||
"Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook,
|
||||
cluster did not switch from non-containerized to containerized ceph daemons.
|
||||
To switch from non-containerized to containerized ceph daemons, either say 'yes' on the prompt or
|
||||
or use `-e ireallymeanit=yes` on the command line when
|
||||
invoking the playbook"
|
||||
when: ireallymeanit != 'yes'
|
||||
|
||||
|
||||
- name: make sure docker is present and started
|
||||
|
||||
vars:
|
||||
mon_group_name: mons
|
||||
osd_group_name: osds
|
||||
mds_group_name: mdss
|
||||
rgw_group_name: rgws
|
||||
rbdmirror_group_name: rbd_mirrors
|
||||
nfs_group_name: nfss
|
||||
|
||||
hosts:
|
||||
- "{{ mon_group_name }}"
|
||||
- "{{ osd_group_name }}"
|
||||
- "{{ mds_group_name }}"
|
||||
- "{{ rgw_group_name }}"
|
||||
- "{{ rbdmirror_group_name }}"
|
||||
- "{{ nfs_group_name }}"
|
||||
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
|
||||
- name: install docker and dependancies for the docker module
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- python-docker-py
|
||||
- python-urllib3
|
||||
- docker
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: install docker-py for the docker module
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- docker-py
|
||||
- python-urllib3
|
||||
- docker
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: start docker service
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: check if selinux is enabled
|
||||
command: getenforce
|
||||
register: sestatus
|
||||
changed_when: false
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: set selinux permissions
|
||||
command: chcon -Rt svirt_sandbox_file_t "{{ item }}"
|
||||
with_items:
|
||||
- /etc/ceph
|
||||
- /var/lib/ceph
|
||||
changed_when: false
|
||||
when:
|
||||
- sestatus.stdout != 'Disabled'
|
||||
- ansible_os_family == 'RedHat'
|
||||
|
||||
- name: switching from non-containerized to containerized ceph mon
|
||||
|
||||
vars:
|
||||
mon_group_name: mons
|
||||
|
||||
hosts:
|
||||
- "{{ mon_group_name }}"
|
||||
|
||||
serial: 1
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_vars: ../roles/ceph-common/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-mon/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-restapi/defaults/main.yml
|
||||
- include_vars: ../group_vars/all
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/mons
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/restapis
|
||||
failed_when: false
|
||||
|
||||
- name: select a running monitor
|
||||
set_fact: mon_host={{ item }}
|
||||
with_items: groups.mons
|
||||
when: item != inventory_hostname
|
||||
|
||||
- name: get current ceph fsid
|
||||
command: ceph fsid
|
||||
register: ceph_fsid
|
||||
changed_when: false
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
||||
- name: stop ceph mon bare metal service
|
||||
service:
|
||||
name: "ceph-mon@{{ ansible_hostname }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
|
||||
# NOTE(leseb): should we also create systemd files
|
||||
# intead of running raw docker commands?
|
||||
# It is probably more elegant but will require a template file...
|
||||
# which would make this single file playbook more complex
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_mon_docker_image_tag | match("latest")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_mon_docker_image_tag | search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_mon_docker_image_tag | search("centos")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_mon_docker_image_tag | search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ ceph_uid }}"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- /var/lib/ceph
|
||||
- /etc/ceph
|
||||
|
||||
- name: start ceph mon container image
|
||||
docker:
|
||||
image: "{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}:{{ ceph_mon_docker_image_tag }}"
|
||||
name: "{{ ansible_hostname }}"
|
||||
net: "host"
|
||||
state: "running"
|
||||
privileged: "{{ mon_docker_privileged }}"
|
||||
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},CEPH_FSID={{ ceph_fsid.stdout }},{{ ceph_mon_extra_envs }}"
|
||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||
|
||||
- name: waiting for the monitor to join the quorum...
|
||||
shell: |
|
||||
ceph -s --cluster {{ cluster }} | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 5
|
||||
delay: 10
|
||||
changed_when: false
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
||||
|
||||
- name: switching from non-containerized to containerized ceph osd
|
||||
|
||||
vars:
|
||||
osd_group_name: osds
|
||||
|
||||
hosts:
|
||||
- "{{ osd_group_name }}"
|
||||
|
||||
serial: 1
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_vars: ../roles/ceph-common/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-osd/defaults/main.yml
|
||||
- include_vars: ../group_vars/all
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/osds
|
||||
failed_when: false
|
||||
|
||||
- name: collect osd ids
|
||||
shell: |
|
||||
if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi
|
||||
register: osd_ids
|
||||
changed_when: false
|
||||
|
||||
- name: collect osd devices
|
||||
shell: |
|
||||
blkid | awk '/ceph data/ { sub ("1:", "", $1); print $1 }'
|
||||
register: ceph_osd_docker_devices
|
||||
changed_when: false
|
||||
|
||||
- name: stop ceph osd service
|
||||
service:
|
||||
name: "ceph-osd@{{ item }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
with_items: "{{ osd_ids.stdout_lines }}"
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_osd_docker_image_tag | match("latest")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_osd_docker_image_tag | search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_osd_docker_image_tag | search("centos")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_osd_docker_image_tag | search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ ceph_uid }}"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- /var/lib/ceph
|
||||
- /etc/ceph
|
||||
|
||||
- name: check if containerized osds are already running
|
||||
shell: |
|
||||
docker ps | grep -sq {{ item | regex_replace('/', '') }}
|
||||
changed_when: false
|
||||
with_items: "{{ ceph_osd_docker_devices.stdout_lines }}"
|
||||
register: osd_running
|
||||
|
||||
- name: unmount all the osd directories
|
||||
mount:
|
||||
name: "/var/lib/ceph/osd/{{ cluster }}-{{ item.0 }}"
|
||||
state: unmounted
|
||||
src: "{{ item.1 }}"
|
||||
fstype: xfs
|
||||
with_together:
|
||||
- "{{ osd_ids.stdout_lines }}"
|
||||
- "{{ ceph_osd_docker_devices.stdout_lines }}"
|
||||
- "{{ osd_running.results }}"
|
||||
when: item.2.rc != 0
|
||||
|
||||
- name: start ceph osd container image(s)
|
||||
docker:
|
||||
image: "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}"
|
||||
name: "{{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}"
|
||||
net: host
|
||||
pid: host
|
||||
state: started
|
||||
privileged: yes
|
||||
env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
|
||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro,/dev:/dev,/run:/run"
|
||||
with_items: "{{ ceph_osd_docker_devices.stdout_lines }}"
|
||||
|
||||
- name: waiting for clean pgs...
|
||||
shell: |
|
||||
test "$(ceph pg stat --cluster {{ cluster }} | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(ceph pg stat --cluster {{ cluster }} | sed 's/pgs.*//;s/^.*://;s/ //')" && ceph health --cluster {{ cluster }} | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 10
|
||||
delay: 10
|
||||
changed_when: false
|
||||
delegate_to: "{{ groups.mons[0] }}"
|
||||
|
||||
|
||||
- name: switching from non-containerized to containerized ceph mds
|
||||
|
||||
vars:
|
||||
mds_group_name: mdss
|
||||
|
||||
hosts:
|
||||
- "{{ mds_group_name }}"
|
||||
|
||||
serial: 1
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_vars: ../roles/ceph-common/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-mds/defaults/main.yml
|
||||
- include_vars: ../group_vars/all
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/mdss
|
||||
failed_when: false
|
||||
|
||||
- name: stop ceph mds service
|
||||
service:
|
||||
name: "ceph-mds@{{ ansible_hostname }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_mds_docker_image_tag | match("latest")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_mds_docker_image_tag | search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_mds_docker_image_tag | search("centos")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_mds_docker_image_tag | search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ ceph_uid }}"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- /var/lib/ceph
|
||||
- /etc/ceph
|
||||
|
||||
- name: start ceph metadata container image
|
||||
docker:
|
||||
image: "{{ ceph_mds_docker_username }}/{{ ceph_mds_docker_imagename }}:{{ ceph_mds_docker_image_tag }}"
|
||||
name: ceph-{{ ansible_hostname }}-mds
|
||||
net: host
|
||||
state: running
|
||||
env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}"
|
||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||
|
||||
|
||||
- name: switching from non-containerized to containerized ceph rgw
|
||||
|
||||
vars:
|
||||
rgw_group_name: rgws
|
||||
|
||||
hosts:
|
||||
- "{{ rgw_group_name }}"
|
||||
|
||||
serial: 1
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_vars: ../roles/ceph-common/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-rgw/defaults/main.yml
|
||||
- include_vars: ../group_vars/all
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/rgws
|
||||
failed_when: false
|
||||
|
||||
- name: stop ceph rgw service
|
||||
service:
|
||||
name: "ceph-rgw@{{ ansible_hostname }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_rgw_docker_image_tag | match("latest")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_rgw_docker_image_tag | search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_rgw_docker_image_tag | search("centos")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_rgw_docker_image_tag | search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ ceph_uid }}"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- /var/lib/ceph
|
||||
- /etc/ceph
|
||||
|
||||
- name: start ceph rados gateway container image
|
||||
docker:
|
||||
image: "{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}:{{ ceph_rgw_docker_image_tag }}"
|
||||
name: ceph-{{ ansible_hostname }}-rgw
|
||||
expose: "{{ ceph_rgw_civetweb_port }}"
|
||||
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
||||
state: running
|
||||
env: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}"
|
||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||
|
||||
|
||||
- name: switching from non-containerized to containerized ceph rbd-mirror
|
||||
|
||||
vars:
|
||||
rbdmirror_group_name: rbd_mirrors
|
||||
|
||||
hosts:
|
||||
- "{{ rbdmirror_group_name }}"
|
||||
|
||||
serial: 1
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_vars: ../roles/ceph-common/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-rbd-mirror/defaults/main.yml
|
||||
- include_vars: ../group_vars/all
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/rbd-mirrors
|
||||
failed_when: false
|
||||
|
||||
- name: stop ceph rbd mirror service
|
||||
service:
|
||||
name: "ceph-rbd-mirror@{{ ansible_hostname }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_rbd_mirror_docker_image_tag | match("latest")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_rbd_mirror_docker_image_tag | search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_rbd_mirror_docker_image_tag | search("centos")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_rbd_mirror_docker_image_tag | search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ ceph_uid }}"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- /var/lib/ceph
|
||||
- /etc/ceph
|
||||
|
||||
- name: start ceph rbd mirror container image
|
||||
docker:
|
||||
image: "{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}"
|
||||
name: "{{ ansible_hostname }}"
|
||||
net: host
|
||||
state: running
|
||||
volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||
|
||||
|
||||
- name: switching from non-containerized to containerized ceph nfs
|
||||
|
||||
vars:
|
||||
nfs_group_name: nfss
|
||||
|
||||
hosts:
|
||||
- "{{ nfs_group_name }}"
|
||||
|
||||
serial: 1
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_vars: ../roles/ceph-common/defaults/main.yml
|
||||
- include_vars: ../roles/ceph-nfs/defaults/main.yml
|
||||
- include_vars: ../group_vars/all
|
||||
failed_when: false
|
||||
- include_vars: ../group_vars/nfss
|
||||
failed_when: false
|
||||
|
||||
- name: stop ceph nfs service
|
||||
service:
|
||||
name: "ceph-nfs@{{ ansible_hostname }}"
|
||||
state: stopped
|
||||
enabled: no
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_nfs_docker_image_tag | match("latest")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 64045
|
||||
when: ceph_nfs_docker_image_tag | search("ubuntu")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_nfs_docker_image_tag | search("centos")
|
||||
|
||||
- set_fact:
|
||||
ceph_uid: 167
|
||||
when: ceph_nfs_docker_image_tag | search("fedora")
|
||||
|
||||
- name: set proper ownership on ceph directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ ceph_uid }}"
|
||||
recurse: yes
|
||||
with_items:
|
||||
- /var/lib/ceph
|
||||
- /etc/ceph
|
||||
|
||||
- name: start ceph nfs container image
|
||||
docker:
|
||||
image: "{{ ceph_nfs_docker_username }}/{{ ceph_nfs_docker_imagename }}:{{ ceph_nfs_docker_image_tag }}"
|
||||
name: "{{ ansible_hostname }}"
|
||||
net: "host"
|
||||
state: "running"
|
||||
privileged: true
|
||||
ports: "{{ ceph_nfs_port }}:{{ ceph_nfs_port }},111:111"
|
||||
env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}"
|
||||
volumes: "/etc/ceph:/etc/ceph,/etc/ganesha:/etc/ganesha,/etc/localtime:/etc/localtime:ro"
|
|
@ -42,6 +42,7 @@ if [[ "Debian" =~ $os_VENDOR ]]; then
|
|||
pip install PyYAML jinja2 paramiko
|
||||
git clone https://github.com/ansible/ansible.git
|
||||
cd ansible
|
||||
git submodule update --init --recursive
|
||||
make install
|
||||
mkdir /etc/ansible
|
||||
elif [[ "Ubuntu" =~ $os_VENDOR || "LinuxMint" =~ $os_VENDOR ]]; then
|
||||
|
|
|
@ -4,4 +4,4 @@ coreos_pypy_url: https://bitbucket.org/pypy/pypy/downloads/pypy-{{coreos_pypy_ve
|
|||
pypy_directory: /opt/pypy
|
||||
pypy_binary_directory: /opt/bin
|
||||
pip_url: https://bootstrap.pypa.io/get-pip.py
|
||||
local_temp_directory: /tmp
|
||||
local_temp_directory: /tmp
|
||||
|
|
|
@ -13,4 +13,4 @@
|
|||
ignore_errors: true
|
||||
|
||||
- include: install_pip.yml
|
||||
when: need_pip | failed and need_python | failed
|
||||
when: need_pip | failed and need_python | failed
|
||||
|
|
|
@ -33,10 +33,6 @@ check_firewall: False
|
|||
# "state=latest".
|
||||
upgrade_ceph_packages: False
|
||||
|
||||
# If this is false then the 'ceph' package will be installed on rpm systems, which
|
||||
# is not needed for versions after infernalis.
|
||||
use_server_package_split: true
|
||||
|
||||
# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\
|
||||
|
||||
debian_package_dependencies:
|
||||
|
@ -105,16 +101,18 @@ ceph_stable_key: https://download.ceph.com/keys/release.asc
|
|||
ceph_stable_release: jewel # ceph stable release
|
||||
ceph_stable_repo: "http://download.ceph.com/debian-{{ ceph_stable_release }}"
|
||||
|
||||
###################
|
||||
# Stable Releases #
|
||||
###################
|
||||
ceph_stable_releases:
|
||||
- dumpling
|
||||
- emperor
|
||||
- firefly
|
||||
- giant
|
||||
- hammer
|
||||
- infernalis
|
||||
######################################
|
||||
# Releases name to number dictionary #
|
||||
######################################
|
||||
ceph_release_num:
|
||||
dumpling: 0.67
|
||||
emperor: 0.72
|
||||
firefly: 0.80
|
||||
giant: 0.87
|
||||
hammer: 0.94
|
||||
infernalis: 9
|
||||
jewel: 10
|
||||
kraken: 11
|
||||
|
||||
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
|
||||
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
|
||||
|
@ -139,8 +137,8 @@ ceph_stable_redhat_distro: el7
|
|||
#
|
||||
ceph_rhcs: false
|
||||
# This will affect how/what repositories are enabled depending on the desired
|
||||
# version. The next version will use "2" not "2.0" which would not work.
|
||||
ceph_rhcs_version: 1.3 # next version is 2
|
||||
# version. The previous version was 1.3. The current version is 2.
|
||||
ceph_rhcs_version: 2
|
||||
ceph_rhcs_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
|
||||
ceph_rhcs_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
|
||||
#ceph_rhcs_iso_path:
|
||||
|
@ -267,6 +265,8 @@ filestore_xattr_use_omap: null
|
|||
## MDS options
|
||||
#
|
||||
mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
|
||||
mds_allow_multimds: false
|
||||
mds_max_mds: 3
|
||||
|
||||
## Rados Gateway options
|
||||
#
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
- socket.rc == 0
|
||||
- ansible_distribution != 'Ubuntu'
|
||||
- mon_group_name in group_names
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: restart ceph mons with systemd
|
||||
service:
|
||||
|
@ -19,7 +19,7 @@
|
|||
- socket.rc == 0
|
||||
- use_systemd
|
||||
- mon_group_name in group_names
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: restart ceph mons on ubuntu
|
||||
command: initctl restart ceph-mon cluster={{ cluster }} id={{ monitor_name }}
|
||||
|
@ -35,7 +35,7 @@
|
|||
- socket.rc == 0
|
||||
- ansible_distribution != 'Ubuntu'
|
||||
- osd_group_name in group_names
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
# This does not just restart OSDs but everything else too. Unfortunately
|
||||
# at this time the ansible role does not have an OSD id list to use
|
||||
|
@ -48,7 +48,7 @@
|
|||
- socket.rc == 0
|
||||
- use_systemd
|
||||
- osd_group_name in group_names
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: restart ceph osds on ubuntu
|
||||
shell: |
|
||||
|
@ -76,7 +76,7 @@
|
|||
- ansible_distribution != 'Ubuntu'
|
||||
- use_systemd
|
||||
- mds_group_name in group_names
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: restart ceph mdss with systemd
|
||||
service:
|
||||
|
@ -86,7 +86,7 @@
|
|||
- socket.rc == 0
|
||||
- use_systemd
|
||||
- mds_group_name in group_names
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: restart ceph rgws on ubuntu
|
||||
command: initctl restart radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
|
||||
|
@ -102,7 +102,7 @@
|
|||
- socketrgw.rc == 0
|
||||
- ansible_distribution != 'Ubuntu'
|
||||
- rgw_group_name in group_names
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: restart ceph rgws on red hat
|
||||
command: /etc/init.d/ceph-radosgw restart
|
||||
|
@ -110,7 +110,7 @@
|
|||
- socketrgw.rc == 0
|
||||
- ansible_os_family == 'RedHat'
|
||||
- rgw_group_name in group_names
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: restart ceph rgws with systemd
|
||||
service:
|
||||
|
@ -120,7 +120,7 @@
|
|||
- socketrgw.rc == 0
|
||||
- use_systemd
|
||||
- rgw_group_name in group_names
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: restart apache2
|
||||
service:
|
||||
|
|
|
@ -40,12 +40,13 @@
|
|||
- osd_objectstore != 'bluestore'
|
||||
- osd_group_name in group_names
|
||||
|
||||
- name: make sure monitor_interface or monitor_address is configured
|
||||
- name: make sure monitor_interface or monitor_address or monitor_address_block is configured
|
||||
fail:
|
||||
msg: "monitor_interface or monitor_address must be configured. Interface for the monitor to listen on or IP address of that interface"
|
||||
msg: "Either monitor_interface, monitor_address, or monitor_address_block must be configured. Interface for the monitor to listen on or IP address of that interface"
|
||||
when:
|
||||
- monitor_interface == 'interface'
|
||||
- monitor_address == '0.0.0.0'
|
||||
- not monitor_address_block
|
||||
- mon_group_name in group_names
|
||||
|
||||
- name: make sure cluster_network configured
|
||||
|
|
|
@ -21,48 +21,6 @@
|
|||
- set_fact:
|
||||
use_systemd={{ init_system.strip() == 'systemd' }}
|
||||
|
||||
# NOTE (leseb/jsaintrocc): You are supposed to quote variables
|
||||
# that follow colons to avoid confusion with dicts but this
|
||||
# causes issues with the boolean, so we keep this syntax styling...
|
||||
# is_before facts
|
||||
- set_fact:
|
||||
is_before_hammer={{ ceph_version | version_compare('0.94.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
is_before_infernalis={{ ceph_version | version_compare('9.0.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
is_before_jewel={{ ceph_version | version_compare('10.0.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
is_before_kraken={{ ceph_version | version_compare('11.0.0', '<') }}
|
||||
|
||||
# is_after facts
|
||||
- set_fact:
|
||||
is_after_hammer={{ ceph_version | version_compare('0.95.0', '>') }}
|
||||
|
||||
- set_fact:
|
||||
is_after_infernalis={{ ceph_version | version_compare('9.4.0', '>') }}
|
||||
|
||||
- set_fact:
|
||||
is_after_jewel={{ ceph_version | version_compare('10.4.0', '>') }}
|
||||
|
||||
- set_fact:
|
||||
is_after_kraken={{ ceph_version | version_compare('11.4.0', '>') }}
|
||||
|
||||
# is_version facts
|
||||
- set_fact:
|
||||
is_hammer={{ ceph_version | version_compare('0.94.0', '>=') | version_compare('9.0.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
is_infernalis={{ ceph_version | version_compare('9.0.0', '>=') and ceph_version | version_compare('10.0.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
is_jewel={{ ceph_version | version_compare('10.0.0', '>=') and ceph_version | version_compare('11.0.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
is_kraken={{ ceph_version | version_compare('11.0.0', '>=') and ceph_version | version_compare('12.0.0', '<') }}
|
||||
|
||||
- set_fact:
|
||||
mds_name: "{{ ansible_hostname }}"
|
||||
when: not mds_use_fqdn
|
||||
|
|
|
@ -44,3 +44,9 @@
|
|||
changed_when: false
|
||||
when: ceph_custom
|
||||
|
||||
- name: add gluster nfs ganesha repo
|
||||
apt_repository:
|
||||
repo: "ppa:gluster/nfs-ganesha"
|
||||
state: present
|
||||
changed_when: false
|
||||
when: nfs_obj_gw or nfs_file_gw
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
- name: install dependencies
|
||||
apt:
|
||||
pkg: "{{ item }}"
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
with_items: debian_package_dependencies
|
||||
with_items: "{{debian_package_dependencies}}"
|
||||
|
||||
- name: configure ceph apt repository
|
||||
include: debian_ceph_repository.yml
|
||||
|
@ -13,10 +13,10 @@
|
|||
|
||||
- name: install ceph
|
||||
apt:
|
||||
pkg: "{{ item }}"
|
||||
name: "{{ item }}"
|
||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
default_release: "{{ ceph_stable_release_uca | default(ansible_distribution_release) }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
|
||||
with_items: debian_ceph_packages
|
||||
with_items: "{{debian_ceph_packages}}"
|
||||
|
||||
- name: install ceph-test
|
||||
apt:
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
yum:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: redhat_package_dependencies
|
||||
with_items: "{{ redhat_package_dependencies }}"
|
||||
when:
|
||||
- ansible_distribution == "RedHat"
|
||||
- ansible_pkg_mgr == "yum"
|
||||
|
@ -12,7 +12,7 @@
|
|||
dnf:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: redhat_package_dependencies
|
||||
with_items: "{{ redhat_package_dependencies }}"
|
||||
when:
|
||||
- ansible_distribution == "RedHat"
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
|
@ -21,7 +21,7 @@
|
|||
yum:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: centos_package_dependencies
|
||||
with_items: "{{ centos_package_dependencies }}"
|
||||
when:
|
||||
- ansible_distribution == "CentOS"
|
||||
- ansible_pkg_mgr == "yum"
|
||||
|
@ -30,7 +30,7 @@
|
|||
dnf:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: centos_package_dependencies
|
||||
with_items: "{{ centos_package_dependencies }}"
|
||||
when:
|
||||
- ansible_distribution == "CentOS"
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
|
@ -58,7 +58,7 @@
|
|||
copy:
|
||||
src: "{{ansible_dir}}/rundep"
|
||||
dest: "{{ item }}"
|
||||
with_items: rundep_location.stdout_lines
|
||||
with_items: "{{ rundep_location }}.stdout_lines"
|
||||
when:
|
||||
- ceph_origin == 'local'
|
||||
- use_installer
|
||||
|
@ -66,7 +66,7 @@
|
|||
- name: install ceph dependencies
|
||||
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
|
||||
become: true
|
||||
with_items: rundep_location.stdout_lines
|
||||
with_items: "{{ rundep_location }}.stdout_lines"
|
||||
when:
|
||||
- ceph_origin == 'local'
|
||||
- use_installer
|
||||
|
@ -76,7 +76,7 @@
|
|||
name: ceph
|
||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
when:
|
||||
- not use_server_package_split
|
||||
- ceph_release_num.{{ ceph_release }} <= ceph_release_num.infernalis
|
||||
- ansible_pkg_mgr == "yum"
|
||||
- ceph_origin != 'local'
|
||||
|
||||
|
@ -106,8 +106,7 @@
|
|||
when:
|
||||
- mon_group_name in group_names
|
||||
- ansible_pkg_mgr == "yum"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
or ceph_dev
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_custom
|
||||
|
||||
|
@ -118,9 +117,8 @@
|
|||
when:
|
||||
- mon_group_name in group_names
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph osd via yum
|
||||
|
@ -130,9 +128,8 @@
|
|||
when:
|
||||
- osd_group_name in group_names
|
||||
- ansible_pkg_mgr == "yum"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph osd via dnf
|
||||
|
@ -142,9 +139,8 @@
|
|||
when:
|
||||
- osd_group_name in group_names
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph mds via yum
|
||||
|
@ -154,9 +150,8 @@
|
|||
when:
|
||||
- mds_group_name in group_names
|
||||
- ansible_pkg_mgr == "yum"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph mds via dnf
|
||||
|
@ -166,7 +161,30 @@
|
|||
when:
|
||||
- mds_group_name in group_names
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph-fuse via yum
|
||||
yum:
|
||||
name: "ceph-fuse"
|
||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
when:
|
||||
- client_group_name in group_names
|
||||
- ansible_pkg_mgr == "yum"
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph-fuse via dnf
|
||||
dnf:
|
||||
name: "ceph-fuse"
|
||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
when:
|
||||
- client_group_name in group_names
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
@ -178,9 +196,8 @@
|
|||
when:
|
||||
- client_group_name in group_names
|
||||
- ansible_pkg_mgr == "yum"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install distro or red hat storage ceph base via dnf
|
||||
|
@ -190,9 +207,8 @@
|
|||
when:
|
||||
- client_group_name in group_names
|
||||
- ansible_pkg_mgr == "dnf"
|
||||
- (ceph_stable and ceph_stable_release not in ceph_stable_releases)
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
|
||||
or ceph_origin == "distro"
|
||||
or ceph_dev
|
||||
or ceph_custom
|
||||
|
||||
- name: install ceph-test
|
||||
|
|
|
@ -69,6 +69,12 @@
|
|||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
when: mds_group_name in group_names
|
||||
|
||||
- name: install red hat storage ceph-fuse client
|
||||
apt:
|
||||
pkg: ceph-fuse
|
||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||
when: client_group_name in group_names
|
||||
|
||||
- name: install red hat storage ceph-common
|
||||
apt:
|
||||
pkg: ceph-common
|
||||
|
|
|
@ -3,11 +3,18 @@
|
|||
|
||||
- include: ./checks/check_mandatory_vars.yml
|
||||
|
||||
# Set ceph_release
|
||||
- include: ./release.yml
|
||||
|
||||
- include: ./checks/check_firewall.yml
|
||||
when: check_firewall
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./misc/system_tuning.yml
|
||||
when: osd_group_name in group_names
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml
|
||||
when:
|
||||
|
@ -15,6 +22,8 @@
|
|||
- ceph_rhcs_iso_install
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml
|
||||
when:
|
||||
|
@ -23,6 +32,8 @@
|
|||
- ansible_os_family == "RedHat"
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./installs/install_on_redhat.yml
|
||||
when:
|
||||
|
@ -30,6 +41,8 @@
|
|||
not ceph_rhcs_iso_install
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./installs/install_rh_storage_on_redhat.yml
|
||||
when:
|
||||
|
@ -37,6 +50,8 @@
|
|||
- ceph_rhcs
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./installs/install_on_debian.yml
|
||||
when:
|
||||
|
@ -44,6 +59,8 @@
|
|||
- not ceph_rhcs
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./installs/install_rh_storage_on_debian.yml
|
||||
when:
|
||||
|
@ -51,6 +68,8 @@
|
|||
- ceph_rhcs
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./installs/install_rgw_on_redhat.yml
|
||||
when:
|
||||
|
@ -59,6 +78,8 @@
|
|||
- rgw_group_name in group_names
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./installs/install_rgw_on_debian.yml
|
||||
when:
|
||||
|
@ -67,16 +88,22 @@
|
|||
- rgw_group_name in group_names
|
||||
tags:
|
||||
- package-install
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./misc/ntp_redhat.yml
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
- ntp_service_enabled
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: ./misc/ntp_debian.yml
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- ntp_service_enabled
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
- include: facts.yml
|
||||
|
||||
|
@ -84,77 +111,77 @@
|
|||
dir_owner: ceph
|
||||
dir_group: ceph
|
||||
dir_mode: "0755"
|
||||
when: is_after_hammer
|
||||
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- set_fact:
|
||||
dir_owner: root
|
||||
dir_group: root
|
||||
dir_mode: "0755"
|
||||
when: is_before_infernalis
|
||||
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- set_fact:
|
||||
key_owner: root
|
||||
key_group: root
|
||||
key_mode: "0600"
|
||||
when: is_before_infernalis
|
||||
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- set_fact:
|
||||
key_owner: ceph
|
||||
key_group: ceph
|
||||
key_mode: "0600"
|
||||
when: is_after_hammer
|
||||
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- set_fact:
|
||||
activate_file_owner: ceph
|
||||
activate_file_group: ceph
|
||||
activate_file_mode: "0644"
|
||||
when: is_after_hammer
|
||||
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- set_fact:
|
||||
activate_file_owner: root
|
||||
activate_file_group: root
|
||||
activate_file_mode: "0644"
|
||||
when: is_before_infernalis
|
||||
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- set_fact:
|
||||
rbd_client_directory_owner: root
|
||||
when:
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
- rbd_client_directory_owner is not defined
|
||||
or not rbd_client_directory_owner
|
||||
|
||||
- set_fact:
|
||||
rbd_client_directory_owner: ceph
|
||||
when:
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
- rbd_client_directory_owner is not defined
|
||||
or not rbd_client_directory_owner
|
||||
|
||||
- set_fact:
|
||||
rbd_client_directory_group: root
|
||||
when:
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
- rbd_client_directory_group is not defined
|
||||
or not rbd_client_directory_group
|
||||
|
||||
- set_fact:
|
||||
rbd_client_directory_group: ceph
|
||||
when:
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
- rbd_client_directory_group is not defined
|
||||
or not rbd_client_directory_group
|
||||
|
||||
- set_fact:
|
||||
rbd_client_directory_mode: "1777"
|
||||
when:
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
- rbd_client_directory_mode is not defined
|
||||
or not rbd_client_directory_mode
|
||||
|
||||
- set_fact:
|
||||
rbd_client_directory_mode: "0770"
|
||||
when:
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
- rbd_client_directory_mode is not defined
|
||||
or not rbd_client_directory_mode
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
# Set ceph_release to ceph_stable by default
|
||||
- set_fact:
|
||||
ceph_release: "{{ ceph_stable_release }}"
|
||||
|
||||
# Set ceph_release to latest known release (Which should match ceph_dev)
|
||||
- set_fact:
|
||||
ceph_release: "{{ item.key }}"
|
||||
when:
|
||||
- ceph_dev
|
||||
- ({{ item.value }} > ceph_release_num.{{ ceph_release }})
|
||||
with_dict: "{{ ceph_release_num }}"
|
|
@ -21,7 +21,9 @@ mon_initial_members = {% if groups[mon_group_name] is defined %}{% for host in g
|
|||
{% endif %}
|
||||
|
||||
{% if not mon_containerized_deployment and not mon_containerized_deployment_with_kv %}
|
||||
{% if groups[mon_group_name] is defined %}
|
||||
{% if monitor_address_block is defined %}
|
||||
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_all_ipv4_addresses'] | ipaddr(monitor_address_block) | first }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
{% elif groups[mon_group_name] is defined %}
|
||||
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + monitor_interface]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
{% elif (hostvars[host]['monitor_interface'] is defined and hostvars[host]['monitor_interface'] != "interface") or monitor_interface != "interface" %}
|
||||
{% include 'mon_addr_interface.j2' %}
|
||||
|
@ -54,9 +56,6 @@ public_network = {{ public_network }}
|
|||
{% if cluster_network is defined %}
|
||||
cluster_network = {{ cluster_network }}
|
||||
{% endif %}
|
||||
{% if common_single_host_mode is defined %}
|
||||
osd crush chooseleaf type = 0
|
||||
{% endif %}
|
||||
|
||||
[client.libvirt]
|
||||
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||
|
|
|
@ -76,7 +76,7 @@
|
|||
failed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: start and add that the metadata service to the init sequence (upstart)
|
||||
command: initctl emit ceph-mds cluster={{ cluster }} id={{ mds_name }}
|
||||
|
@ -93,7 +93,7 @@
|
|||
changed_when: false
|
||||
when:
|
||||
- not use_systemd
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: start and add that the metadata service to the init sequence (systemd after hammer)
|
||||
service:
|
||||
|
@ -103,4 +103,4 @@
|
|||
changed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
|
|
@ -53,6 +53,12 @@ openstack_cinder_backup_pool:
|
|||
name: backups
|
||||
pg_num: "{{ pool_default_pg_num }}"
|
||||
|
||||
openstack_pools:
|
||||
- "{{ openstack_glance_pool }}"
|
||||
- "{{ openstack_cinder_pool }}"
|
||||
- "{{ openstack_nova_pool }}"
|
||||
- "{{ openstack_cinder_backup_pool }}"
|
||||
|
||||
openstack_keys:
|
||||
- { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
|
||||
- { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
|
||||
|
|
|
@ -9,9 +9,24 @@
|
|||
- cephfs_data
|
||||
- cephfs_metadata
|
||||
changed_when: false
|
||||
when: not {{ ceph_version | version_compare('0.84.0', '<') }}
|
||||
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.firefly
|
||||
|
||||
- name: create ceph filesystem
|
||||
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
|
||||
changed_when: false
|
||||
when: not {{ ceph_version | version_compare('0.84.0', '<') }}
|
||||
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.firefly
|
||||
|
||||
- name: allow multimds
|
||||
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it
|
||||
changed_when: false
|
||||
when:
|
||||
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
|
||||
- mds_allow_multimds
|
||||
|
||||
- name: set max_mds
|
||||
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}
|
||||
changed_when: false
|
||||
when:
|
||||
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
|
||||
- mds_allow_multimds
|
||||
- mds_max_mds > 1
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: generate monitor initial keyring
|
||||
local_action: shell python -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
|
||||
local_action: shell python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
|
||||
creates={{ fetch_directory }}/monitor_keyring.conf
|
||||
register: monitor_keyring
|
||||
become: false
|
||||
|
@ -50,7 +50,7 @@
|
|||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
|
||||
when:
|
||||
- cephx
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: ceph monitor mkfs without keyring (for or after infernalis release)
|
||||
command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
|
||||
|
@ -58,7 +58,7 @@
|
|||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
||||
when:
|
||||
- not cephx
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: ceph monitor mkfs with keyring (before infernalis release)
|
||||
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
|
||||
|
@ -66,7 +66,7 @@
|
|||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
|
||||
when:
|
||||
- cephx
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: ceph monitor mkfs without keyring (before infernalis release)
|
||||
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
|
||||
|
@ -74,4 +74,4 @@
|
|||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
||||
when:
|
||||
- not cephx
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
---
|
||||
- name: create openstack pool
|
||||
command: ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pg_num }}
|
||||
with_items:
|
||||
- "{{ openstack_glance_pool }}"
|
||||
- "{{ openstack_cinder_pool }}"
|
||||
- "{{ openstack_nova_pool }}"
|
||||
- "{{ openstack_cinder_backup_pool }}"
|
||||
with_items: "{{ openstack_pools }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
|
@ -13,6 +9,6 @@
|
|||
command: ceph --cluster {{ cluster }} auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
||||
args:
|
||||
creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
||||
with_items: openstack_keys
|
||||
with_items: "{{ openstack_keys }}"
|
||||
changed_when: false
|
||||
when: cephx
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
- name: collect all the pools
|
||||
command: rados --cluster {{ cluster }} lspools
|
||||
register: ceph_pools
|
||||
when: "{{ ceph_version | version_compare('0.94.0', '>=') }}"
|
||||
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.hammer
|
||||
|
||||
- name: secure the cluster
|
||||
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
|
||||
with_nested:
|
||||
- "{{ ceph_pools.stdout_lines|default([]) }}"
|
||||
- secure_cluster_flags
|
||||
when: "{{ ceph_version | version_compare('0.94.0', '>=') }}"
|
||||
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.hammer
|
||||
|
|
|
@ -20,14 +20,28 @@
|
|||
when:
|
||||
- not use_systemd
|
||||
|
||||
# NOTE (leseb): somehow the service ansible module is messing things up
|
||||
# as a safety measure we run the raw command
|
||||
# legacy ceph system v init scripts require a mon section in order to work
|
||||
# Not Ubuntu so we can catch old debian systems that don't use systemd or upstart
|
||||
- name: add mon section into ceph.conf for systemv init scripts
|
||||
ini_file:
|
||||
dest: /etc/ceph/{{ cluster }}.conf
|
||||
section: mon.{{ ansible_host }}
|
||||
option: host
|
||||
value: "{{ ansible_host }}"
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family != "Ubuntu"
|
||||
- ceph_release_num.{{ ceph_stable_release }} < ceph_release_num.infernalis
|
||||
|
||||
# NOTE (jsaintrocc): can't use service module because we need to use the
|
||||
# legacy systemv init for older ceph releases. Even when the os supports systemd
|
||||
# Not Ubuntu so we can catch old debian systems that don't use systemd or upstart
|
||||
- name: start and add that the monitor service to the init sequence
|
||||
command: service ceph start mon
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_distribution != "Ubuntu"
|
||||
- is_before_infernalis
|
||||
- ansible_os_family != "Ubuntu"
|
||||
- ceph_release_num.{{ ceph_stable_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: start and add that the monitor service to the init sequence (for or after infernalis)
|
||||
command: systemctl enable ceph-mon@{{ monitor_name }}
|
||||
|
@ -35,7 +49,7 @@
|
|||
failed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: start the monitor service (for or after infernalis)
|
||||
service:
|
||||
|
@ -45,7 +59,7 @@
|
|||
changed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: collect admin and bootstrap keys
|
||||
command: ceph-create-keys --cluster {{ cluster }} --id {{ monitor_name }}
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
msg: "ceph-disk failed to create an OSD"
|
||||
when:
|
||||
" 'ceph-disk: Error: ceph osd create failed' in item.get('stderr', '') "
|
||||
with_items: "{{ combined_activate_osd_disk_results.results }}"
|
||||
with_items: "{{ (combined_activate_osd_disk_results|default({})).results|default([]) }}"
|
||||
|
||||
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
|
||||
- name: activate osd(s) when device is a partition
|
||||
|
@ -89,7 +89,7 @@
|
|||
- ansible_selinux != false
|
||||
- ansible_selinux['status'] == 'enabled'
|
||||
- ansible_selinux['config_mode'] != 'disabled'
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: start and add that the osd service(s) to the init sequence (before infernalis)
|
||||
service:
|
||||
|
@ -98,7 +98,7 @@
|
|||
enabled: yes
|
||||
when:
|
||||
- ansible_distribution != "Ubuntu"
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: get osd id (for or after infernalis)
|
||||
shell: 'ls /var/lib/ceph/osd/ | grep -oP "\d+$"'
|
||||
|
@ -107,24 +107,24 @@
|
|||
register: osd_id
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: enable the osd service (for or after infernalis)
|
||||
command: systemctl enable ceph-osd@{{ item }}
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
with_items: "{{ osd_id.stdout_lines }}"
|
||||
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: start and add that the osd service(s) to the init sequence (for or after infernalis)
|
||||
service:
|
||||
name: ceph-osd@{{ item }}
|
||||
state: started
|
||||
enabled: yes
|
||||
with_items: "{{ osd_id.stdout_lines }}"
|
||||
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
- name: check the partition status of the journal devices
|
||||
shell: "parted --script {{ item }} print > /dev/null 2>&1"
|
||||
with_items: "{{ raw_journal_devices }}"
|
||||
with_items: "{{ raw_journal_devices|default([])|unique }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: journal_partition_status
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: check if the device is a partition (autodiscover disks)
|
||||
shell: "echo '/dev/{{ item.key }}' | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'"
|
||||
shell: "readlink -f /dev/{{ item.key }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'"
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: check if the device is a partition
|
||||
shell: "echo '{{ item }}' | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'"
|
||||
shell: "readlink -f {{ item }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'"
|
||||
with_items: "{{ devices }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
changed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: enable systemd unit file for the rbd mirror service (systemd after hammer)
|
||||
command: systemctl enable ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}
|
||||
|
@ -20,7 +20,7 @@
|
|||
failed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: start and add that the rbd mirror service to the init sequence (systemd after hammer)
|
||||
service:
|
||||
|
@ -30,4 +30,4 @@
|
|||
changed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
|
|
@ -16,7 +16,10 @@
|
|||
|
||||
- name: include rgw multisite playbooks
|
||||
include: multisite/main.yml
|
||||
when: rgw_zone is defined and rgw_multisite and ( is_jewel or is_after_jewel )
|
||||
when:
|
||||
- rgw_zone is defined
|
||||
- rgw_multisite
|
||||
- ( ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel )
|
||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||
static: False
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
creates: /var/run/systemd/generator.late/ceph-radosgw.service
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: activate rados gateway with upstart
|
||||
file:
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
enabled: yes
|
||||
when:
|
||||
- ansible_os_family == 'RedHat'
|
||||
- is_before_infernalis
|
||||
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
|
||||
|
||||
- name: enable systemd unit file for rgw instance (for or after infernalis)
|
||||
command: systemctl enable ceph-radosgw@rgw.{{ ansible_hostname }}
|
||||
|
@ -34,7 +34,7 @@
|
|||
failed_when: false
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
||||
- name: start rgw with systemd (for or after infernalis)
|
||||
service:
|
||||
|
@ -43,4 +43,4 @@
|
|||
enabled: yes
|
||||
when:
|
||||
- use_systemd
|
||||
- is_after_hammer
|
||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
|
||||
|
|
|
@ -10,6 +10,7 @@ rgw_vms: 0
|
|||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
#restapi: false
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
|
||||
vagrant_box: 'linode'
|
||||
vagrant_box_url: 'https://github.com/displague/vagrant-linode/raw/master/box/linode.box'
|
||||
|
||||
# Set a label prefix for the machines in this cluster. (This is useful and necessary when running multiple clusters concurrently.)
|
||||
#label_prefix: 'foo'
|
||||
|
||||
ssh_username: 'vagrant'
|
||||
ssh_private_key_path: '~/.ssh/id_rsa'
|
||||
|
||||
cloud_distribution: 'CentOS 7'
|
||||
cloud_datacenter: 'newark'
|
||||
|
||||
# Memory for each Linode instance, this determines price! See Linode plans.
|
||||
memory: 2048
|
||||
|
||||
# The private network on Linode, you probably don't want to change this.
|
||||
subnet: 192.168.0
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 3
|
||||
osd_vms: 3
|
||||
mds_vms: 1
|
||||
rgw_vms: 0
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# INSTALL SOURCE OF CEPH
|
||||
# valid values are 'stable' and 'dev'
|
||||
ceph_install_source: 'dev'
|
||||
ceph_install_branch: 'master'
|
||||
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
# vagrant_sync_dir: /home/vagrant/sync
|
||||
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -42,3 +42,5 @@ vagrant_box: 'openstack'
|
|||
#os_flavor :
|
||||
#os_image :
|
||||
#os_keypair_name :
|
||||
#os_networks :
|
||||
#os_floating_ip_pool :
|
||||
|
|
Loading…
Reference in New Issue