mirror of https://github.com/ceph/ceph-ansible.git
Merged with Upstream Master
commit
32f6ef7747
|
@ -18,6 +18,7 @@ CLIENTS = settings['client_vms']
|
||||||
SUBNET = settings['subnet']
|
SUBNET = settings['subnet']
|
||||||
BOX = settings['vagrant_box']
|
BOX = settings['vagrant_box']
|
||||||
BOX_URL = settings['vagrant_box_url']
|
BOX_URL = settings['vagrant_box_url']
|
||||||
|
SYNC_DIR = settings['vagrant_sync_dir']
|
||||||
MEMORY = settings['memory']
|
MEMORY = settings['memory']
|
||||||
STORAGECTL = settings['vagrant_storagectl']
|
STORAGECTL = settings['vagrant_storagectl']
|
||||||
ETH = settings['eth']
|
ETH = settings['eth']
|
||||||
|
@ -112,7 +113,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
|
|
||||||
# Faster bootup. Disable if you need this for libvirt
|
# Faster bootup. Disable if you need this for libvirt
|
||||||
config.vm.provider :libvirt do |v,override|
|
config.vm.provider :libvirt do |v,override|
|
||||||
override.vm.synced_folder '.', '/home/vagrant/sync', disabled: true
|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||||
end
|
end
|
||||||
|
|
||||||
if BOX == 'openstack'
|
if BOX == 'openstack'
|
||||||
|
|
|
@ -84,12 +84,24 @@ dummy:
|
||||||
|
|
||||||
## Configure package origin
|
## Configure package origin
|
||||||
#
|
#
|
||||||
#ceph_origin: 'upstream' # or 'distro'
|
#ceph_origin: 'upstream' #'distro' or 'local'
|
||||||
# 'distro' means that no separate repo file will be added
|
# 'distro' means that no separate repo file will be added
|
||||||
# you will get whatever version of Ceph is included in your Linux distro.
|
# you will get whatever version of Ceph is included in your Linux distro.
|
||||||
#
|
# 'local' means that the ceph binaries will be copied over from the local machine
|
||||||
#ceph_use_distro_backports: false # DEBIAN ONLY
|
|
||||||
|
|
||||||
|
# LOCAL CEPH INSTALLATION (ceph_origin==local)
|
||||||
|
#
|
||||||
|
# Path to DESTDIR of the ceph install
|
||||||
|
#ceph_installation_dir: "/path/to/ceph_installation/"
|
||||||
|
# Whether or not to use installer script rundep_installer.sh
|
||||||
|
# This script takes in rundep and installs the packages line by line onto the machine
|
||||||
|
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
|
||||||
|
# all runtime dependencies installed
|
||||||
|
#use_installer: false
|
||||||
|
# Root directory for ceph-ansible
|
||||||
|
#ansible_dir: "/path/to/ceph-ansible"
|
||||||
|
|
||||||
|
#ceph_use_distro_backports: false # DEBIAN ONLY
|
||||||
|
|
||||||
# STABLE
|
# STABLE
|
||||||
########
|
########
|
||||||
|
@ -132,15 +144,15 @@ dummy:
|
||||||
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
|
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
|
||||||
# on RHEL 7.
|
# on RHEL 7.
|
||||||
#
|
#
|
||||||
#ceph_stable_rh_storage: false
|
#ceph_rhcs: false
|
||||||
# This will affect how/what repositories are enabled depending on the desired
|
# This will affect how/what repositories are enabled depending on the desired
|
||||||
# version. The next version will use "2" not "2.0" which would not work.
|
# version. The next version will use "2" not "2.0" which would not work.
|
||||||
#ceph_stable_rh_storage_version: 1.3 # next version is 2
|
#ceph_rhcs_version: 1.3 # next version is 2
|
||||||
#ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
|
#ceph_rhcs_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
|
||||||
#ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
|
#ceph_rhcs_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
|
||||||
#ceph_stable_rh_storage_iso_path:
|
#ceph_rhcs_iso_path:
|
||||||
#ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount
|
#ceph_rhcs_mount_path: /tmp/rh-storage-mount
|
||||||
#ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content
|
#ceph_rhcs_repository_path: /tmp/rh-storage-repo # where to copy iso's content
|
||||||
|
|
||||||
|
|
||||||
# UBUNTU CLOUD ARCHIVE
|
# UBUNTU CLOUD ARCHIVE
|
||||||
|
@ -352,6 +364,8 @@ dummy:
|
||||||
#raw_multi_journal: False
|
#raw_multi_journal: False
|
||||||
#osd_directory: False
|
#osd_directory: False
|
||||||
#bluestore: False
|
#bluestore: False
|
||||||
|
#dmcrypt_journal_collocation: False
|
||||||
|
#dmcrypt_dedicated_journal: False
|
||||||
|
|
||||||
#osd_auto_discovery: False
|
#osd_auto_discovery: False
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,28 @@ dummy:
|
||||||
#ceph_nfs_pseudo_path: "/ceph"
|
#ceph_nfs_pseudo_path: "/ceph"
|
||||||
#ceph_nfs_protocols: "3,4"
|
#ceph_nfs_protocols: "3,4"
|
||||||
#ceph_nfs_access_type: "RW"
|
#ceph_nfs_access_type: "RW"
|
||||||
|
#ceph_nfs_log_file: "/var/log/ganesha.log"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# FSAL Ceph Config #
|
||||||
|
####################
|
||||||
|
#ceph_nfs_ceph_export_id: 20134
|
||||||
|
#ceph_nfs_ceph_pseudo_path: "/ceph"
|
||||||
|
#ceph_nfs_ceph_protocols: "3,4"
|
||||||
|
#ceph_nfs_ceph_access_type: "RW"
|
||||||
|
|
||||||
|
###################
|
||||||
|
# FSAL RGW Config #
|
||||||
|
###################
|
||||||
|
#ceph_nfs_rgw_export_id: 20134
|
||||||
|
#ceph_nfs_rgw_pseudo_path: "/ceph"
|
||||||
|
#ceph_nfs_rgw_protocols: "3,4"
|
||||||
|
#ceph_nfs_rgw_access_type: "RW"
|
||||||
|
#ceph_nfs_rgw_user: "cephnfs"
|
||||||
|
# Note: keys are optional and can be generated, but not on containerized, where
|
||||||
|
# they must be configered.
|
||||||
|
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
|
||||||
|
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
|
||||||
|
|
||||||
|
|
||||||
###################
|
###################
|
||||||
|
|
|
@ -121,7 +121,7 @@ dummy:
|
||||||
# Keys are stored into the monitors k/v store
|
# Keys are stored into the monitors k/v store
|
||||||
# Use 'true' to enable this scenario
|
# Use 'true' to enable this scenario
|
||||||
# Both journal and data are stored on the same dm-crypt encrypted device
|
# Both journal and data are stored on the same dm-crypt encrypted device
|
||||||
#dmcrypt_journal_colocation: false
|
#dmcrypt_journal_collocation: false
|
||||||
|
|
||||||
|
|
||||||
# VI. Encrypt osd data and/or journal devices with dm-crypt.
|
# VI. Encrypt osd data and/or journal devices with dm-crypt.
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
Infrastructure playbooks
|
||||||
|
========================
|
||||||
|
|
||||||
|
This directory contains a variety of playbooks that can be used independently of the Ceph roles we have.
|
||||||
|
They aim to perform infrastructure related tasks that would help use managing a Ceph cluster or performing certain operational tasks.
|
|
@ -261,7 +261,7 @@
|
||||||
rbdmirror_group_name in group_names
|
rbdmirror_group_name in group_names
|
||||||
|
|
||||||
- name: check for anything running ceph
|
- name: check for anything running ceph
|
||||||
shell: "ps awux | grep -v grep | grep -q -- ceph-"
|
shell: "ps awux | grep -- [c]eph-"
|
||||||
register: check_for_running_ceph
|
register: check_for_running_ceph
|
||||||
failed_when: check_for_running_ceph.rc == 0
|
failed_when: check_for_running_ceph.rc == 0
|
||||||
|
|
||||||
|
@ -293,15 +293,18 @@
|
||||||
osd_group_name in group_names
|
osd_group_name in group_names
|
||||||
|
|
||||||
- name: remove osd mountpoint tree
|
- name: remove osd mountpoint tree
|
||||||
shell: rm -rf /var/lib/ceph/osd
|
file:
|
||||||
|
path: /var/lib/ceph/osd/
|
||||||
|
state: absent
|
||||||
register: remove_osd_mountpoints
|
register: remove_osd_mountpoints
|
||||||
failed_when: false
|
ignore_errors: true
|
||||||
when:
|
when:
|
||||||
osd_group_name in group_names
|
osd_group_name in group_names
|
||||||
|
|
||||||
- name: remove monitor store and bootstrap keys
|
- name: remove monitor store and bootstrap keys
|
||||||
shell: rm -rf /var/lib/ceph/
|
file:
|
||||||
failed_when: false
|
path: /var/lib/ceph/
|
||||||
|
state: absent
|
||||||
when:
|
when:
|
||||||
mon_group_name in group_names
|
mon_group_name in group_names
|
||||||
|
|
||||||
|
@ -313,7 +316,7 @@
|
||||||
- remove data
|
- remove data
|
||||||
when:
|
when:
|
||||||
osd_group_name in group_names and
|
osd_group_name in group_names and
|
||||||
remove_osd_mountpoints.rc != 0
|
remove_osd_mountpoints.failed is defined
|
||||||
|
|
||||||
- name: see if ceph-disk is installed
|
- name: see if ceph-disk is installed
|
||||||
shell: "which ceph-disk"
|
shell: "which ceph-disk"
|
||||||
|
@ -322,7 +325,7 @@
|
||||||
|
|
||||||
- name: zap osd disks
|
- name: zap osd disks
|
||||||
shell: ceph-disk zap "{{ item }}"
|
shell: ceph-disk zap "{{ item }}"
|
||||||
with_items: devices
|
with_items: "{{ devices | default([]) }}"
|
||||||
when:
|
when:
|
||||||
osd_group_name in group_names and
|
osd_group_name in group_names and
|
||||||
ceph_disk_present.rc == 0 and
|
ceph_disk_present.rc == 0 and
|
||||||
|
@ -423,6 +426,7 @@
|
||||||
|
|
||||||
- name: request data removal
|
- name: request data removal
|
||||||
local_action: shell echo requesting data removal
|
local_action: shell echo requesting data removal
|
||||||
|
become: false
|
||||||
notify:
|
notify:
|
||||||
- remove data
|
- remove data
|
||||||
|
|
|
@ -0,0 +1,142 @@
|
||||||
|
---
|
||||||
|
# This playbook shrinks the Ceph monitors from your cluster
|
||||||
|
# It can remove any number of monitor(s) from the cluster and ALL THEIR DATA
|
||||||
|
#
|
||||||
|
# Use it like this:
|
||||||
|
# ansible-playbook shrink-mon.yml -e mon_host=ceph-mon01,ceph-mon02
|
||||||
|
# Prompts for confirmation to shrink, defaults to no and
|
||||||
|
# doesn't shrink the cluster. yes shrinks the cluster.
|
||||||
|
#
|
||||||
|
# ansible-playbook -e ireallymeanit=yes|no shrink-cluster.yml
|
||||||
|
# Overrides the prompt using -e option. Can be used in
|
||||||
|
# automation scripts to avoid interactive prompt.
|
||||||
|
|
||||||
|
|
||||||
|
- name: confirm whether user really meant to remove monitor(s) from the ceph cluster
|
||||||
|
|
||||||
|
hosts:
|
||||||
|
- localhost
|
||||||
|
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
|
||||||
|
vars_prompt:
|
||||||
|
- name: ireallymeanit
|
||||||
|
prompt: Are you sure you want to shrink the cluster?
|
||||||
|
default: 'no'
|
||||||
|
private: no
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- include_vars: roles/ceph-common/defaults/main.yml
|
||||||
|
- include_vars: group_vars/all
|
||||||
|
|
||||||
|
- name: exit playbook, if user did not mean to shrink cluster
|
||||||
|
fail:
|
||||||
|
msg: "Exiting shrink-mon playbook, no monitor(s) was/were removed.
|
||||||
|
To shrink the cluster, either say 'yes' on the prompt or
|
||||||
|
or use `-e ireallymeanit=yes` on the command line when
|
||||||
|
invoking the playbook"
|
||||||
|
when: ireallymeanit != 'yes'
|
||||||
|
|
||||||
|
- name: exit playbook, if no monitor(s) was/were given
|
||||||
|
fail:
|
||||||
|
msg: "mon_host must be declared
|
||||||
|
Exiting shrink-cluster playbook, no monitor(s) was/were removed.
|
||||||
|
On the command line when invoking the playbook, you can use
|
||||||
|
-e mon_host=ceph-mon01,ceph-mon02 argument."
|
||||||
|
when: mon_host is not defined
|
||||||
|
|
||||||
|
- name: test if ceph command exist
|
||||||
|
command: command -v ceph
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_command
|
||||||
|
|
||||||
|
- name: exit playbook, if ceph command does not exist
|
||||||
|
debug:
|
||||||
|
msg: "The ceph command is not available, please install it :("
|
||||||
|
run_once: true
|
||||||
|
when:
|
||||||
|
- ceph_command.rc != 0
|
||||||
|
|
||||||
|
- name: exit playbook, if cluster files do not exist
|
||||||
|
stat:
|
||||||
|
path: "{{ item }}"
|
||||||
|
register: ceph_conf_key
|
||||||
|
with_items:
|
||||||
|
- /etc/ceph/{{ cluster }}.conf
|
||||||
|
- /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- fail:
|
||||||
|
msg: "Ceph's configuration file is not present in /etc/ceph"
|
||||||
|
with_items: "{{ceph_conf_key.results}}"
|
||||||
|
when:
|
||||||
|
- item.stat.exists == false
|
||||||
|
|
||||||
|
- name: exit playbook, if can not connect to the cluster
|
||||||
|
command: timeout 5 ceph --cluster {{ cluster }} health
|
||||||
|
register: ceph_health
|
||||||
|
until: ceph_health.stdout.find("HEALTH") > -1
|
||||||
|
retries: 5
|
||||||
|
delay: 2
|
||||||
|
|
||||||
|
- name: verify given monitors are reachable
|
||||||
|
command: ping -c 1 {{ item }}
|
||||||
|
with_items: "{{mon_host.split(',')}}"
|
||||||
|
register: mon_reachable
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- fail:
|
||||||
|
msg: "One or more monitors are not reachable, please check your /etc/hosts or your DNS"
|
||||||
|
with_items: "{{mon_reachable.results}}"
|
||||||
|
when:
|
||||||
|
- item.rc != 0
|
||||||
|
|
||||||
|
- name: stop monitor service (systemd)
|
||||||
|
service:
|
||||||
|
name: ceph-mon@{{ item }}
|
||||||
|
state: stopped
|
||||||
|
enabled: no
|
||||||
|
with_items: "{{mon_host.split(',')}}"
|
||||||
|
delegate_to: "{{item}}"
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: purge monitor store
|
||||||
|
file:
|
||||||
|
path: /var/lib/ceph/mon/{{ cluster }}-{{ item }}
|
||||||
|
state: absent
|
||||||
|
with_items: "{{mon_host.split(',')}}"
|
||||||
|
delegate_to: "{{item}}"
|
||||||
|
|
||||||
|
- name: remove monitor from the quorum
|
||||||
|
command: ceph --cluster {{ cluster }} mon remove {{ item }}
|
||||||
|
failed_when: false
|
||||||
|
with_items: "{{mon_host.split(',')}}"
|
||||||
|
|
||||||
|
# NOTE (leseb): sorry for the 'sleep' command
|
||||||
|
# but it will take a couple of seconds for other monitors
|
||||||
|
# to notice that one member has left.
|
||||||
|
# 'sleep 5' is not that bad and should be sufficient
|
||||||
|
- name: verify the monitor is out of the cluster
|
||||||
|
shell: "sleep 5 && ceph --cluster {{ cluster }} -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ item }}"
|
||||||
|
with_items: "{{mon_host.split(',')}}"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_health_mon
|
||||||
|
|
||||||
|
- name: please remove the monitor from your ceph configuration file
|
||||||
|
debug:
|
||||||
|
msg: "The monitor(s) has/have been successfully removed from the cluster.
|
||||||
|
Please remove the monitor(s) entry(ies) from the rest of your ceph configuration files, cluster wide."
|
||||||
|
run_once: true
|
||||||
|
with_items: "{{ceph_health_mon.results}}"
|
||||||
|
when:
|
||||||
|
- item.rc != 0
|
||||||
|
|
||||||
|
- name: please remove the monitor from your ceph configuration file
|
||||||
|
fail:
|
||||||
|
msg: "Monitor(s) appear(s) to still be part of the cluster, please check what happened."
|
||||||
|
run_once: true
|
||||||
|
with_items: "{{ceph_health_mon.results}}"
|
||||||
|
when:
|
||||||
|
- item.rc == 0
|
|
@ -0,0 +1,131 @@
|
||||||
|
---
|
||||||
|
# This playbook shrinks Ceph OSDs.
|
||||||
|
# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA
|
||||||
|
#
|
||||||
|
# Use it like this:
|
||||||
|
# ansible-playbook shrink-osd.yml -e osd_id=0,2,6
|
||||||
|
# Prompts for confirmation to shrink, defaults to no and
|
||||||
|
# doesn't shrink the cluster. yes shrinks the cluster.
|
||||||
|
#
|
||||||
|
# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml
|
||||||
|
# Overrides the prompt using -e option. Can be used in
|
||||||
|
# automation scripts to avoid interactive prompt.
|
||||||
|
|
||||||
|
|
||||||
|
- name: confirm whether user really meant to remove osd(s) from the cluster
|
||||||
|
|
||||||
|
hosts:
|
||||||
|
- localhost
|
||||||
|
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
|
||||||
|
vars_prompt:
|
||||||
|
- name: ireallymeanit
|
||||||
|
prompt: Are you sure you want to shrink the cluster?
|
||||||
|
default: 'no'
|
||||||
|
private: no
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- include_vars: roles/ceph-common/defaults/main.yml
|
||||||
|
- include_vars: group_vars/all
|
||||||
|
|
||||||
|
- name: exit playbook, if user did not mean to shrink cluster
|
||||||
|
fail:
|
||||||
|
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
|
||||||
|
To shrink the cluster, either say 'yes' on the prompt or
|
||||||
|
or use `-e ireallymeanit=yes` on the command line when
|
||||||
|
invoking the playbook"
|
||||||
|
when: ireallymeanit != 'yes'
|
||||||
|
|
||||||
|
- name: exit playbook, if no osd(s) was/were given
|
||||||
|
fail:
|
||||||
|
msg: "osd_ids must be declared
|
||||||
|
Exiting shrink-osd playbook, no OSD()s was/were removed.
|
||||||
|
On the command line when invoking the playbook, you can use
|
||||||
|
-e osd_ids=0,1,2,3 argument."
|
||||||
|
when: osd_ids is not defined
|
||||||
|
|
||||||
|
- name: test if ceph command exist
|
||||||
|
command: command -v ceph
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_command
|
||||||
|
|
||||||
|
- name: exit playbook, if ceph command does not exist
|
||||||
|
debug:
|
||||||
|
msg: "The ceph command is not available, please install it :("
|
||||||
|
run_once: true
|
||||||
|
when:
|
||||||
|
- ceph_command.rc != 0
|
||||||
|
|
||||||
|
- name: exit playbook, if cluster files do not exist
|
||||||
|
stat:
|
||||||
|
path: "{{ item }}"
|
||||||
|
register: ceph_conf_key
|
||||||
|
with_items:
|
||||||
|
- /etc/ceph/{{ cluster }}.conf
|
||||||
|
- /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- fail:
|
||||||
|
msg: "Ceph's configuration file is not present in /etc/ceph"
|
||||||
|
with_items: "{{ceph_conf_key.results}}"
|
||||||
|
when:
|
||||||
|
- item.stat.exists == false
|
||||||
|
|
||||||
|
- name: exit playbook, if can not connect to the cluster
|
||||||
|
command: timeout 5 ceph --cluster {{ cluster }} health
|
||||||
|
register: ceph_health
|
||||||
|
until: ceph_health.stdout.find("HEALTH") > -1
|
||||||
|
retries: 5
|
||||||
|
delay: 2
|
||||||
|
|
||||||
|
# NOTE (leseb): just in case, the complex filters mechanism below does not work anymore.
|
||||||
|
# This will be a quick and easy fix but will require using the shell module.
|
||||||
|
# - name: find the host where the osd(s) is/are running on
|
||||||
|
# shell: |
|
||||||
|
# ceph --cluster {{ cluster }} osd find {{ item }} | grep -Po '(?<="ip": ")[^:]*'
|
||||||
|
# with_items: "{{osd_ids.split(',')}}"
|
||||||
|
# register: osd_hosts
|
||||||
|
#
|
||||||
|
- name: find the host where the osd(s) is/are running on
|
||||||
|
command: ceph --cluster {{ cluster }} osd find {{ item }}
|
||||||
|
with_items: "{{osd_ids.split(',')}}"
|
||||||
|
register: osd_hosts
|
||||||
|
|
||||||
|
- set_fact: ip_item="{{(item.stdout | from_json).ip}}"
|
||||||
|
with_items: "{{osd_hosts.results}}"
|
||||||
|
register: ip_result
|
||||||
|
|
||||||
|
- set_fact: ips="{{ ip_result.results | map(attribute='ansible_facts.ip_item') | list }}"
|
||||||
|
|
||||||
|
- set_fact: real_ips="{{ ips | regex_replace(':[0-9][0-9][0-9][0-9]\/[0-9][0-9][0-9][0-9]', '') }}"
|
||||||
|
|
||||||
|
- name: check if ceph admin key exists on the osd nodes
|
||||||
|
stat:
|
||||||
|
path: "/etc/ceph/{{ cluster }}.client.admin.keyring"
|
||||||
|
register: ceph_admin_key
|
||||||
|
with_items: "{{real_ips}}"
|
||||||
|
delegate_to: "{{item}}"
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- fail:
|
||||||
|
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
|
||||||
|
with_items: "{{ceph_admin_key.results}}"
|
||||||
|
when:
|
||||||
|
- item.stat.exists == false
|
||||||
|
|
||||||
|
- name: deactivating osd(s)
|
||||||
|
command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out
|
||||||
|
with_together:
|
||||||
|
- "{{osd_ids.split(',')}}"
|
||||||
|
- "{{real_ips}}"
|
||||||
|
delegate_to: "{{item.1}}"
|
||||||
|
|
||||||
|
- name: destroying osd(s)
|
||||||
|
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.0 }} --zap
|
||||||
|
with_together:
|
||||||
|
- "{{osd_ids.split(',')}}"
|
||||||
|
- "{{real_ips}}"
|
||||||
|
delegate_to: "{{item.1}}"
|
|
@ -25,7 +25,7 @@ Have a look at `defaults/main.yml`.
|
||||||
* Install source, choose one of these:
|
* Install source, choose one of these:
|
||||||
* `ceph_stable`
|
* `ceph_stable`
|
||||||
* `ceph_dev`
|
* `ceph_dev`
|
||||||
* `ceph_stable_rh_storage`
|
* `ceph_rhcs`
|
||||||
* `ceph_custom`
|
* `ceph_custom`
|
||||||
* `journal_size`
|
* `journal_size`
|
||||||
* `monitor_interface`
|
* `monitor_interface`
|
||||||
|
|
|
@ -76,12 +76,24 @@ ceph_test: False
|
||||||
|
|
||||||
## Configure package origin
|
## Configure package origin
|
||||||
#
|
#
|
||||||
ceph_origin: 'upstream' # or 'distro'
|
ceph_origin: 'upstream' # or 'distro' or 'local'
|
||||||
# 'distro' means that no separate repo file will be added
|
# 'distro' means that no separate repo file will be added
|
||||||
# you will get whatever version of Ceph is included in your Linux distro.
|
# you will get whatever version of Ceph is included in your Linux distro.
|
||||||
#
|
# 'local' means that the ceph binaries will be copied over from the local machine
|
||||||
ceph_use_distro_backports: false # DEBIAN ONLY
|
|
||||||
|
|
||||||
|
# LOCAL CEPH INSTALLATION (ceph_origin==local)
|
||||||
|
#
|
||||||
|
# Path to DESTDIR of the ceph install
|
||||||
|
#ceph_installation_dir: "/path/to/ceph_installation/"
|
||||||
|
# Whether or not to use installer script rundep_installer.sh
|
||||||
|
# This script takes in rundep and installs the packages line by line onto the machine
|
||||||
|
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
|
||||||
|
# all runtime dependencies installed
|
||||||
|
#use_installer: false
|
||||||
|
# Root directory for ceph-ansible
|
||||||
|
#ansible_dir: "/path/to/ceph-ansible"
|
||||||
|
|
||||||
|
ceph_use_distro_backports: false # DEBIAN ONLY
|
||||||
|
|
||||||
# STABLE
|
# STABLE
|
||||||
########
|
########
|
||||||
|
@ -124,15 +136,15 @@ ceph_stable_redhat_distro: el7
|
||||||
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
|
# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
|
||||||
# on RHEL 7.
|
# on RHEL 7.
|
||||||
#
|
#
|
||||||
ceph_stable_rh_storage: false
|
ceph_rhcs: false
|
||||||
# This will affect how/what repositories are enabled depending on the desired
|
# This will affect how/what repositories are enabled depending on the desired
|
||||||
# version. The next version will use "2" not "2.0" which would not work.
|
# version. The next version will use "2" not "2.0" which would not work.
|
||||||
ceph_stable_rh_storage_version: 1.3 # next version is 2
|
ceph_rhcs_version: 1.3 # next version is 2
|
||||||
ceph_stable_rh_storage_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
|
ceph_rhcs_cdn_install: false # assumes all the nodes can connect to cdn.redhat.com
|
||||||
ceph_stable_rh_storage_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
|
ceph_rhcs_iso_install: false # usually used when nodes don't have access to cdn.redhat.com
|
||||||
#ceph_stable_rh_storage_iso_path:
|
#ceph_rhcs_iso_path:
|
||||||
ceph_stable_rh_storage_mount_path: /tmp/rh-storage-mount
|
ceph_rhcs_mount_path: /tmp/rh-storage-mount
|
||||||
ceph_stable_rh_storage_repository_path: /tmp/rh-storage-repo # where to copy iso's content
|
ceph_rhcs_repository_path: /tmp/rh-storage-repo # where to copy iso's content
|
||||||
|
|
||||||
|
|
||||||
# UBUNTU CLOUD ARCHIVE
|
# UBUNTU CLOUD ARCHIVE
|
||||||
|
@ -344,5 +356,7 @@ journal_collocation: False
|
||||||
raw_multi_journal: False
|
raw_multi_journal: False
|
||||||
osd_directory: False
|
osd_directory: False
|
||||||
bluestore: False
|
bluestore: False
|
||||||
|
dmcrypt_journal_collocation: False
|
||||||
|
dmcrypt_dedicated_journal: False
|
||||||
|
|
||||||
osd_auto_discovery: False
|
osd_auto_discovery: False
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
when:
|
when:
|
||||||
- ceph_origin != 'upstream'
|
- ceph_origin != 'upstream'
|
||||||
- ceph_origin != 'distro'
|
- ceph_origin != 'distro'
|
||||||
|
- ceph_origin != 'local'
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
|
@ -15,18 +16,18 @@
|
||||||
- ceph_origin == 'upstream'
|
- ceph_origin == 'upstream'
|
||||||
- not ceph_stable
|
- not ceph_stable
|
||||||
- not ceph_dev
|
- not ceph_dev
|
||||||
- not ceph_stable_rh_storage
|
- not ceph_rhcs
|
||||||
- not ceph_stable_uca
|
- not ceph_stable_uca
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
- name: verify that a method was chosen for red hat storage
|
- name: verify that a method was chosen for red hat storage
|
||||||
fail:
|
fail:
|
||||||
msg: "choose between ceph_stable_rh_storage_cdn_install and ceph_stable_rh_storage_iso_install"
|
msg: "choose between ceph_rhcs_cdn_install and ceph_rhcs_iso_install"
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
- not ceph_stable_rh_storage_cdn_install
|
- not ceph_rhcs_cdn_install
|
||||||
- not ceph_stable_rh_storage_iso_install
|
- not ceph_rhcs_iso_install
|
||||||
- ceph_origin == "upstream"
|
- ceph_origin == "upstream"
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
@ -71,7 +72,7 @@
|
||||||
- not raw_multi_journal
|
- not raw_multi_journal
|
||||||
- not osd_directory
|
- not osd_directory
|
||||||
- not bluestore
|
- not bluestore
|
||||||
- not dmcrypt_journal_colocation
|
- not dmcrypt_journal_collocation
|
||||||
- not dmcrypt_dedicated_journal
|
- not dmcrypt_dedicated_journal
|
||||||
|
|
||||||
- name: verify only one osd scenario was chosen
|
- name: verify only one osd scenario was chosen
|
||||||
|
@ -86,14 +87,15 @@
|
||||||
or (raw_multi_journal and osd_directory)
|
or (raw_multi_journal and osd_directory)
|
||||||
or (raw_multi_journal and bluestore)
|
or (raw_multi_journal and bluestore)
|
||||||
or (osd_directory and bluestore)
|
or (osd_directory and bluestore)
|
||||||
or (dmcrypt_journal_colocation and journal_collocation)
|
or (dmcrypt_journal_collocation and journal_collocation)
|
||||||
or (dmcrypt_journal_colocation and raw_multi_journal)
|
or (dmcrypt_journal_collocation and raw_multi_journal)
|
||||||
or (dmcrypt_journal_colocation and osd_directory)
|
or (dmcrypt_journal_collocation and osd_directory)
|
||||||
or (dmcrypt_journal_colocation and bluestore)
|
or (dmcrypt_journal_collocation and bluestore)
|
||||||
or (dmcrypt_dedicated_journal and journal_collocation)
|
or (dmcrypt_dedicated_journal and journal_collocation)
|
||||||
or (dmcrypt_dedicated_journal and raw_multi_journal)
|
or (dmcrypt_dedicated_journal and raw_multi_journal)
|
||||||
or (dmcrypt_dedicated_journal and osd_directory)
|
or (dmcrypt_dedicated_journal and osd_directory)
|
||||||
or (dmcrypt_dedicated_journal and bluestore)
|
or (dmcrypt_dedicated_journal and bluestore)
|
||||||
|
or (dmcrypt_dedicated_journal and dmcrypt_journal_collocation)
|
||||||
|
|
||||||
- name: verify devices have been provided
|
- name: verify devices have been provided
|
||||||
fail:
|
fail:
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
- name: check ntp installation on atomic
|
||||||
|
command: rpm -q chrony
|
||||||
|
register: ntp_pkg_query
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
|
@ -18,7 +18,7 @@
|
||||||
fail:
|
fail:
|
||||||
msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL 7.1"
|
msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL 7.1"
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
- ansible_distribution_version | version_compare('7.1', '<')
|
- ansible_distribution_version | version_compare('7.1', '<')
|
||||||
|
|
||||||
- name: fail on unsupported distribution for ubuntu cloud archive
|
- name: fail on unsupported distribution for ubuntu cloud archive
|
||||||
|
|
|
@ -39,13 +39,67 @@
|
||||||
include: redhat_ceph_repository.yml
|
include: redhat_ceph_repository.yml
|
||||||
when: ceph_origin == 'upstream'
|
when: ceph_origin == 'upstream'
|
||||||
|
|
||||||
|
- name: make sure /tmp exists
|
||||||
|
file:
|
||||||
|
path: /tmp
|
||||||
|
state: directory
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
- use_installer
|
||||||
|
|
||||||
|
- name: use mktemp to create name for rundep
|
||||||
|
command: "mktemp /tmp/rundep.XXXXXXXX"
|
||||||
|
register: rundep_location
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
- use_installer
|
||||||
|
|
||||||
|
- name: copy rundep
|
||||||
|
copy:
|
||||||
|
src: "{{ansible_dir}}/rundep"
|
||||||
|
dest: "{{ item }}"
|
||||||
|
with_items: rundep_location.stdout_lines
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
- use_installer
|
||||||
|
|
||||||
|
- name: install ceph dependencies
|
||||||
|
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
|
||||||
|
become: true
|
||||||
|
with_items: rundep_location.stdout_lines
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
- use_installer
|
||||||
|
|
||||||
- name: install ceph
|
- name: install ceph
|
||||||
yum:
|
yum:
|
||||||
name: ceph
|
name: ceph
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
when: not use_server_package_split
|
when:
|
||||||
|
- not use_server_package_split
|
||||||
|
- ansible_pkg_mgr == "yum"
|
||||||
|
- ceph_origin != 'local'
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph mon
|
- name: synchronize ceph install
|
||||||
|
synchronize:
|
||||||
|
src: "{{ceph_installation_dir}}/"
|
||||||
|
dest: "/"
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
|
||||||
|
- name: create user group ceph
|
||||||
|
group:
|
||||||
|
name: 'ceph'
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
|
||||||
|
- name: create user ceph
|
||||||
|
user:
|
||||||
|
name: 'ceph'
|
||||||
|
when:
|
||||||
|
- ceph_origin == 'local'
|
||||||
|
|
||||||
|
- name: install distro or red hat storage ceph mon via yum
|
||||||
yum:
|
yum:
|
||||||
name: "ceph-mon"
|
name: "ceph-mon"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -57,7 +111,7 @@
|
||||||
or ceph_origin == "distro"
|
or ceph_origin == "distro"
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph mon
|
- name: install distro or red hat storage ceph mon via dnf
|
||||||
dnf:
|
dnf:
|
||||||
name: "ceph-mon"
|
name: "ceph-mon"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -69,7 +123,7 @@
|
||||||
or ceph_dev
|
or ceph_dev
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph osd
|
- name: install distro or red hat storage ceph osd via yum
|
||||||
yum:
|
yum:
|
||||||
name: "ceph-osd"
|
name: "ceph-osd"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -81,7 +135,7 @@
|
||||||
or ceph_dev
|
or ceph_dev
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph osd
|
- name: install distro or red hat storage ceph osd via dnf
|
||||||
dnf:
|
dnf:
|
||||||
name: "ceph-osd"
|
name: "ceph-osd"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -93,7 +147,7 @@
|
||||||
or ceph_dev
|
or ceph_dev
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph mds
|
- name: install distro or red hat storage ceph mds via yum
|
||||||
yum:
|
yum:
|
||||||
name: "ceph-mds"
|
name: "ceph-mds"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -105,7 +159,7 @@
|
||||||
or ceph_dev
|
or ceph_dev
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph mds
|
- name: install distro or red hat storage ceph mds via dnf
|
||||||
dnf:
|
dnf:
|
||||||
name: "ceph-mds"
|
name: "ceph-mds"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -117,7 +171,7 @@
|
||||||
or ceph_dev
|
or ceph_dev
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph base
|
- name: install distro or red hat storage ceph base via yum
|
||||||
yum:
|
yum:
|
||||||
name: "ceph-base"
|
name: "ceph-base"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -129,7 +183,7 @@
|
||||||
or ceph_dev
|
or ceph_dev
|
||||||
or ceph_custom
|
or ceph_custom
|
||||||
|
|
||||||
- name: install distro or red hat storage ceph base
|
- name: install distro or red hat storage ceph base via dnf
|
||||||
dnf:
|
dnf:
|
||||||
name: "ceph-base"
|
name: "ceph-base"
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
|
@ -173,18 +227,40 @@
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
- ansible_pkg_mgr == "dnf"
|
- ansible_pkg_mgr == "dnf"
|
||||||
|
|
||||||
- name: install NFS gateway
|
- name: install nfs ceph gateway
|
||||||
yum:
|
yum:
|
||||||
name: nfs-ganesha-ceph
|
name: nfs-ganesha-ceph
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
|
||||||
when:
|
when:
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- ansible_pkg_mgr == "yum"
|
- ansible_pkg_mgr == "yum"
|
||||||
|
- fsal_ceph
|
||||||
|
|
||||||
- name: install NFS gateway
|
- name: install nfs ceph gateway
|
||||||
dnf:
|
dnf:
|
||||||
name: nfs-ganesha-ceph
|
name: nfs-ganesha-ceph
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
|
||||||
when:
|
when:
|
||||||
- nfs_group_name in group_names
|
- nfs_group_name in group_names
|
||||||
- ansible_pkg_mgr == "dnf"
|
- ansible_pkg_mgr == "dnf"
|
||||||
|
- fsal_ceph
|
||||||
|
|
||||||
|
- name: install nfs rgw gateway
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- nfs-ganesha-rgw
|
||||||
|
- ceph-radosgw
|
||||||
|
when:
|
||||||
|
- nfs_group_name in group_names
|
||||||
|
- ansible_pkg_mgr == "yum"
|
||||||
|
- fsal_rgw
|
||||||
|
|
||||||
|
- name: install nfs rgw gateway
|
||||||
|
dnf:
|
||||||
|
name: "{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- nfs-ganesha-rgw
|
||||||
|
- ceph-radosgw
|
||||||
|
when:
|
||||||
|
- nfs_group_name in group_names
|
||||||
|
- ansible_pkg_mgr == "dnf"
|
||||||
|
- fsal_rgw
|
||||||
|
|
|
@ -5,15 +5,15 @@
|
||||||
# intelligent way to determine the location of the key.
|
# intelligent way to determine the location of the key.
|
||||||
- name: install the rh ceph storage repository key
|
- name: install the rh ceph storage repository key
|
||||||
apt_key:
|
apt_key:
|
||||||
file: "{{ ceph_stable_rh_storage_repository_path }}/MON/release.asc"
|
file: "{{ ceph_rhcs_repository_path }}/MON/release.asc"
|
||||||
state: present
|
state: present
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
- ceph_stable_rh_storage_iso_install
|
- ceph_rhcs_iso_install
|
||||||
|
|
||||||
- name: add rh ceph storage repositories
|
- name: add rh ceph storage repositories
|
||||||
apt_repository:
|
apt_repository:
|
||||||
repo: "deb file://{{ ceph_stable_rh_storage_repository_path }}/{{ item }}/ {{ ansible_lsb.codename }} main"
|
repo: "deb file://{{ ceph_rhcs_repository_path }}/{{ item }}/ {{ ansible_lsb.codename }} main"
|
||||||
state: present
|
state: present
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_items:
|
with_items:
|
||||||
|
@ -22,14 +22,14 @@
|
||||||
- "Tools"
|
- "Tools"
|
||||||
- "Agent"
|
- "Agent"
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
- ceph_stable_rh_storage_iso_install
|
- ceph_rhcs_iso_install
|
||||||
|
|
||||||
- name: add the red hat storage apt-key
|
- name: add the red hat storage apt-key
|
||||||
apt_key:
|
apt_key:
|
||||||
data: "{{ lookup('file', role_path+'/files/cephstablerhcs.asc') }}"
|
data: "{{ lookup('file', role_path+'/files/cephstablerhcs.asc') }}"
|
||||||
state: present
|
state: present
|
||||||
when: not ceph_stable_rh_storage_iso_install
|
when: not ceph_rhcs_iso_install
|
||||||
|
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
apt:
|
apt:
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
---
|
---
|
||||||
- name: install red hat storage repository key
|
- name: install red hat storage repository key
|
||||||
rpm_key:
|
rpm_key:
|
||||||
key: "{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release"
|
key: "{{ ceph_rhcs_repository_path }}/RPM-GPG-KEY-redhat-release"
|
||||||
state: present
|
state: present
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage_iso_install
|
- ceph_rhcs_iso_install
|
||||||
|
|
||||||
- name: add red hat storage repository
|
- name: add red hat storage repository
|
||||||
template:
|
template:
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: 0644
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage_iso_install
|
- ceph_rhcs_iso_install
|
||||||
|
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
yum:
|
yum:
|
||||||
|
|
|
@ -11,15 +11,15 @@
|
||||||
|
|
||||||
- include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml
|
- include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
- ceph_stable_rh_storage_iso_install
|
- ceph_rhcs_iso_install
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
- include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml
|
- include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml
|
||||||
when:
|
when:
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
- ceph_stable_rh_storage_cdn_install
|
- ceph_rhcs_cdn_install
|
||||||
- ansible_os_family == "RedHat"
|
- ansible_os_family == "RedHat"
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
@ -27,28 +27,28 @@
|
||||||
- include: ./installs/install_on_redhat.yml
|
- include: ./installs/install_on_redhat.yml
|
||||||
when:
|
when:
|
||||||
ansible_os_family == 'RedHat' and
|
ansible_os_family == 'RedHat' and
|
||||||
not ceph_stable_rh_storage_iso_install
|
not ceph_rhcs_iso_install
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
- include: ./installs/install_rh_storage_on_redhat.yml
|
- include: ./installs/install_rh_storage_on_redhat.yml
|
||||||
when:
|
when:
|
||||||
- ansible_distribution == "RedHat"
|
- ansible_distribution == "RedHat"
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
- include: ./installs/install_on_debian.yml
|
- include: ./installs/install_on_debian.yml
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'Debian'
|
- ansible_os_family == 'Debian'
|
||||||
- not ceph_stable_rh_storage
|
- not ceph_rhcs
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
- include: ./installs/install_rh_storage_on_debian.yml
|
- include: ./installs/install_rh_storage_on_debian.yml
|
||||||
when:
|
when:
|
||||||
- ansible_os_family == 'Debian'
|
- ansible_os_family == 'Debian'
|
||||||
- ceph_stable_rh_storage
|
- ceph_rhcs
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- include: ../checks/check_ntp_atomic.yml
|
||||||
|
when: is_atomic
|
||||||
|
|
||||||
|
- name: start the ntp service
|
||||||
|
service:
|
||||||
|
name: chronyd
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
when:
|
||||||
|
- ntp_pkg_query.rc == 0
|
|
@ -5,42 +5,42 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: check if the red hat storage monitor repo is already present
|
- name: check if the red hat storage monitor repo is already present
|
||||||
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-mon-rpms
|
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: rh_storage_mon_repo
|
register: rh_storage_mon_repo
|
||||||
when: mon_group_name in group_names
|
when: mon_group_name in group_names
|
||||||
|
|
||||||
- name: enable red hat storage monitor repository
|
- name: enable red hat storage monitor repository
|
||||||
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-mon-rpms
|
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- mon_group_name in group_names
|
- mon_group_name in group_names
|
||||||
- rh_storage_mon_repo.rc != 0
|
- rh_storage_mon_repo.rc != 0
|
||||||
|
|
||||||
- name: check if the red hat storage osd repo is already present
|
- name: check if the red hat storage osd repo is already present
|
||||||
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-osd-rpms
|
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: rh_storage_osd_repo
|
register: rh_storage_osd_repo
|
||||||
when: osd_group_name in group_names
|
when: osd_group_name in group_names
|
||||||
|
|
||||||
- name: enable red hat storage osd repository
|
- name: enable red hat storage osd repository
|
||||||
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-osd-rpms
|
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- osd_group_name in group_names
|
- osd_group_name in group_names
|
||||||
- rh_storage_osd_repo.rc != 0
|
- rh_storage_osd_repo.rc != 0
|
||||||
|
|
||||||
- name: check if the red hat storage rados gateway repo is already present
|
- name: check if the red hat storage rados gateway repo is already present
|
||||||
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-tools-rpms
|
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: rh_storage_rgw_repo
|
register: rh_storage_rgw_repo
|
||||||
when: rgw_group_name in group_names
|
when: rgw_group_name in group_names
|
||||||
|
|
||||||
- name: enable red hat storage rados gateway repository
|
- name: enable red hat storage rados gateway repository
|
||||||
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_stable_rh_storage_version }}-tools-rpms
|
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- rgw_group_name in group_names
|
- rgw_group_name in group_names
|
||||||
|
|
|
@ -4,40 +4,40 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ ceph_stable_rh_storage_mount_path }}"
|
- "{{ ceph_rhcs_mount_path }}"
|
||||||
- "{{ ceph_stable_rh_storage_repository_path }}"
|
- "{{ ceph_rhcs_repository_path }}"
|
||||||
|
|
||||||
- name: ensure destination iso directory exists
|
- name: ensure destination iso directory exists
|
||||||
file:
|
file:
|
||||||
path: "{{ ceph_stable_rh_storage_iso_path | dirname }}"
|
path: "{{ ceph_rhcs_iso_path | dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: yes
|
recurse: yes
|
||||||
when: "'{{ ceph_stable_rh_storage_iso_path | dirname }}' != '/'"
|
when: "'{{ ceph_rhcs_iso_path | dirname }}' != '/'"
|
||||||
|
|
||||||
- name: fetch the red hat storage iso from the ansible server
|
- name: fetch the red hat storage iso from the ansible server
|
||||||
copy:
|
copy:
|
||||||
src: "{{ ceph_stable_rh_storage_iso_path }}"
|
src: "{{ ceph_rhcs_iso_path }}"
|
||||||
dest: "{{ ceph_stable_rh_storage_iso_path }}"
|
dest: "{{ ceph_rhcs_iso_path }}"
|
||||||
|
|
||||||
# assumption: ceph_stable_rh_storage_mount_path does not specify directory
|
# assumption: ceph_rhcs_mount_path does not specify directory
|
||||||
|
|
||||||
- name: mount red hat storage iso file
|
- name: mount red hat storage iso file
|
||||||
mount:
|
mount:
|
||||||
name: "{{ ceph_stable_rh_storage_mount_path }}"
|
name: "{{ ceph_rhcs_mount_path }}"
|
||||||
src: "{{ ceph_stable_rh_storage_iso_path }}"
|
src: "{{ ceph_rhcs_iso_path }}"
|
||||||
fstype: iso9660
|
fstype: iso9660
|
||||||
opts: ro,loop,noauto
|
opts: ro,loop,noauto
|
||||||
passno: 2
|
passno: 2
|
||||||
state: mounted
|
state: mounted
|
||||||
|
|
||||||
- name: copy red hat storage iso content
|
- name: copy red hat storage iso content
|
||||||
shell: cp -r {{ ceph_stable_rh_storage_mount_path }}/* {{ ceph_stable_rh_storage_repository_path }}
|
shell: cp -r {{ ceph_rhcs_mount_path }}/* {{ ceph_rhcs_repository_path }}
|
||||||
args:
|
args:
|
||||||
creates: "{{ ceph_stable_rh_storage_repository_path }}/README"
|
creates: "{{ ceph_rhcs_repository_path }}/README"
|
||||||
|
|
||||||
- name: unmount red hat storage iso file
|
- name: unmount red hat storage iso file
|
||||||
mount:
|
mount:
|
||||||
name: "{{ ceph_stable_rh_storage_mount_path }}"
|
name: "{{ ceph_rhcs_mount_path }}"
|
||||||
src: "{{ ceph_stable_rh_storage_iso_path }}"
|
src: "{{ ceph_rhcs_iso_path }}"
|
||||||
fstype: iso9660
|
fstype: iso9660
|
||||||
state: unmounted
|
state: unmounted
|
||||||
|
|
|
@ -8,59 +8,65 @@ auth service required = none
|
||||||
auth client required = none
|
auth client required = none
|
||||||
auth supported = none
|
auth supported = none
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if not mon_containerized_deployment_with_kv %}
|
{% if not mon_containerized_deployment_with_kv and not mon_containerized_deployment %}
|
||||||
fsid = {{ fsid }}
|
fsid = {{ fsid }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
max open files = {{ max_open_files }}
|
max open files = {{ max_open_files }}
|
||||||
{% if common_single_host_mode is defined %}
|
{% if common_single_host_mode is defined %}
|
||||||
osd crush chooseleaf type = 0
|
osd crush chooseleaf type = 0
|
||||||
{% endif %}
|
{% endif %}
|
||||||
[client.libvirt]
|
{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
|
||||||
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
|
{% if groups[mon_group_name] is defined %}
|
||||||
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
|
mon_initial_members = {% if groups[mon_group_name] is defined %}{% for host in groups[mon_group_name] %}{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}{{ hostvars[host]['ansible_fqdn'] }}{% if not loop.last %},{% endif %}{% elif hostvars[host]['ansible_hostname'] is defined %}{{ hostvars[host]['ansible_hostname'] }}{% if not loop.last %},{% endif %}{% endif %}{% endfor %}{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
[mon]
|
{% if not mon_containerized_deployment and not mon_containerized_deployment_with_kv %}
|
||||||
{% if not mon_containerized_deployment_with_kv %}
|
{% if groups[mon_group_name] is defined %}
|
||||||
{% for host in groups[mon_group_name] %}
|
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + monitor_interface]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}
|
|
||||||
[mon.{{ hostvars[host]['ansible_fqdn'] }}]
|
|
||||||
host = {{ hostvars[host]['ansible_fqdn'] }}
|
|
||||||
{% elif hostvars[host]['ansible_hostname'] is defined %}
|
|
||||||
[mon.{{ hostvars[host]['ansible_hostname'] }}]
|
|
||||||
host = {{ hostvars[host]['ansible_hostname'] }}
|
|
||||||
{% endif %}
|
|
||||||
# we need to check if monitor_interface is defined in the inventory per host or if it's set in a group_vars file
|
|
||||||
{% if mon_containerized_deployment %}
|
|
||||||
{% set interface = ["ansible_",ceph_mon_docker_interface]|join %}
|
|
||||||
{% if interface in hostvars[host] and 'ipv4' in hostvars[host][interface] %}
|
|
||||||
# user address from interface {{ ceph_mon_docker_interface }}
|
|
||||||
mon addr = {{ hostvars[host][interface]['ipv4']['address'] }}
|
|
||||||
{% elif hostvars[host]['monitor_address'] is defined %}
|
|
||||||
# use host monitor address
|
|
||||||
mon addr = {{ hostvars[host]['monitor_address'] }}
|
|
||||||
{% elif monitor_address != "0.0.0.0" %}
|
|
||||||
# use group_var monitor address
|
|
||||||
mon addr = monitor_address
|
|
||||||
{% endif %}
|
|
||||||
{% elif (hostvars[host]['monitor_interface'] is defined and hostvars[host]['monitor_interface'] != "interface") or monitor_interface != "interface" %}
|
{% elif (hostvars[host]['monitor_interface'] is defined and hostvars[host]['monitor_interface'] != "interface") or monitor_interface != "interface" %}
|
||||||
{% include 'mon_addr_interface.j2' %}
|
{% include 'mon_addr_interface.j2' %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% include 'mon_addr_address.j2' %}
|
{% include 'mon_addr_address.j2' %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% if mon_containerized_deployment %}
|
||||||
|
fsid = {{ fsid }}
|
||||||
|
{% if groups[mon_group_name] is defined %}
|
||||||
|
{% for host in groups[mon_group_name] %}
|
||||||
|
{% if mon_containerized_deployment %}
|
||||||
|
{% set interface = ["ansible_",ceph_mon_docker_interface]|join %}
|
||||||
|
mon_host = {{ hostvars[host][interface]['ipv4']['address'] }}
|
||||||
|
{% if not loop.last %},{% endif %}
|
||||||
|
{% elif hostvars[host]['monitor_address'] is defined %}
|
||||||
|
mon_host = {{ hostvars[host]['monitor_address'] }}
|
||||||
|
{% if not loop.last %},{% endif %}
|
||||||
|
{% elif monitor_address != "0.0.0.0" %}
|
||||||
|
mon_host = monitor_address
|
||||||
|
{% if not loop.last %},{% endif %}
|
||||||
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if public_network is defined %}
|
||||||
|
public_network = {{ public_network }}
|
||||||
|
{% endif %}
|
||||||
|
{% if cluster_network is defined %}
|
||||||
|
cluster_network = {{ cluster_network }}
|
||||||
|
{% endif %}
|
||||||
|
{% if common_single_host_mode is defined %}
|
||||||
|
osd crush chooseleaf type = 0
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[client.libvirt]
|
||||||
|
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
|
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
|
|
||||||
[osd]
|
[osd]
|
||||||
osd mkfs type = {{ osd_mkfs_type }}
|
osd mkfs type = {{ osd_mkfs_type }}
|
||||||
osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
|
osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
|
||||||
osd mount options xfs = {{ osd_mount_options_xfs }}
|
osd mount options xfs = {{ osd_mount_options_xfs }}
|
||||||
osd journal size = {{ journal_size }}
|
osd journal size = {{ journal_size }}
|
||||||
{% if cluster_network is defined %}
|
|
||||||
cluster_network = {{ cluster_network }}
|
|
||||||
{% endif %}
|
|
||||||
{% if public_network is defined %}
|
|
||||||
public_network = {{ public_network }}
|
|
||||||
{% endif %}
|
|
||||||
{% if filestore_xattr_use_omap != None %}
|
{% if filestore_xattr_use_omap != None %}
|
||||||
filestore xattr use omap = {{ filestore_xattr_use_omap }}
|
filestore xattr use omap = {{ filestore_xattr_use_omap }}
|
||||||
{% elif osd_mkfs_type == "ext4" %}
|
{% elif osd_mkfs_type == "ext4" %}
|
||||||
|
|
|
@ -1,17 +1,18 @@
|
||||||
#jinja2: trim_blocks: "true", lstrip_blocks: "true"
|
#jinja2: trim_blocks: "true", lstrip_blocks: "true"
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
{% if fsal_ceph %}
|
||||||
EXPORT
|
EXPORT
|
||||||
{
|
{
|
||||||
Export_ID={{ ceph_nfs_export_id }};
|
Export_ID={{ ceph_nfs_ceph_export_id }};
|
||||||
|
|
||||||
Path = "/";
|
Path = "/";
|
||||||
|
|
||||||
Pseudo = {{ ceph_nfs_pseudo_path }};
|
Pseudo = {{ ceph_nfs_ceph_pseudo_path }};
|
||||||
|
|
||||||
Access_Type = {{ ceph_nfs_access_type }};
|
Access_Type = {{ ceph_nfs_ceph_access_type }};
|
||||||
|
|
||||||
NFS_Protocols = {{ ceph_nfs_protocols }};
|
NFS_Protocols = {{ ceph_nfs_ceph_protocols }};
|
||||||
|
|
||||||
Transport_Protocols = TCP;
|
Transport_Protocols = TCP;
|
||||||
|
|
||||||
|
@ -21,3 +22,37 @@ EXPORT
|
||||||
Name = CEPH;
|
Name = CEPH;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
{% endif %}
|
||||||
|
{% if fsal_rgw %}
|
||||||
|
EXPORT
|
||||||
|
{
|
||||||
|
Export_ID={{ ceph_nfs_rgw_export_id }};
|
||||||
|
|
||||||
|
Path = "/";
|
||||||
|
|
||||||
|
Pseudo = {{ ceph_nfs_rgw_pseudo_path }};
|
||||||
|
|
||||||
|
Access_Type = {{ ceph_nfs_rgw_access_type }};
|
||||||
|
|
||||||
|
NFS_Protocols = {{ ceph_nfs_rgw_protocols }};
|
||||||
|
|
||||||
|
Transport_Protocols = TCP;
|
||||||
|
|
||||||
|
Sectype = sys,krb5,krb5i,krb5p;
|
||||||
|
|
||||||
|
FSAL {
|
||||||
|
Name = RGW;
|
||||||
|
User_Id = "{{ ceph_nfs_rgw_user }}";
|
||||||
|
Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}";
|
||||||
|
Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
LOG {
|
||||||
|
Facility {
|
||||||
|
name = FILE;
|
||||||
|
destination = "{{ ceph_nfs_log_file }}";
|
||||||
|
enable = active;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,2 +1 @@
|
||||||
mon addr = {{ hostvars[host]['monitor_address'] if hostvars[host]['monitor_address'] is defined else monitor_address }}
|
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['monitor_address'] if hostvars[host]['monitor_address'] is defined else monitor_address }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
|
|
||||||
|
|
|
@ -1,2 +1 @@
|
||||||
mon addr = {{ hostvars[host]['ansible_' + (hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface) ]['ipv4']['address'] }}
|
mon host = {% for host in groups[mon_group_name] %}{{ hostvars[host]['ansible_' + (hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface) ]['ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,21 @@
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
[rh_storage_mon]
|
[rh_storage_mon]
|
||||||
name=Red Hat Ceph Storage - local packages for Ceph monitor
|
name=Red Hat Ceph Storage - local packages for Ceph monitor
|
||||||
baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/MON
|
baseurl=file://{{ ceph_rhcs_repository_path }}/MON
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
priority=1
|
priority=1
|
||||||
|
|
||||||
[rh_storage_osd]
|
[rh_storage_osd]
|
||||||
name=Red Hat Ceph Storage - local packages for Ceph OSD
|
name=Red Hat Ceph Storage - local packages for Ceph OSD
|
||||||
baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/OSD
|
baseurl=file://{{ ceph_rhcs_repository_path }}/OSD
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
priority=1
|
priority=1
|
||||||
|
|
||||||
[rh_storage_tools]
|
[rh_storage_tools]
|
||||||
name=Red Hat Ceph Storage - local packages for Ceph client, MDS, and RGW
|
name=Red Hat Ceph Storage - local packages for Ceph client, MDS, and RGW
|
||||||
baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/Tools
|
baseurl=file://{{ ceph_rhcs_repository_path }}/Tools
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
priority=1
|
priority=1
|
||||||
|
|
|
@ -17,6 +17,24 @@
|
||||||
when: ceph_health.rc != 0
|
when: ceph_health.rc != 0
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: "{{ ceph_mds_docker_username }}"
|
ceph_docker_username: "{{ ceph_mds_docker_username }}"
|
||||||
|
|
|
@ -125,3 +125,34 @@
|
||||||
with_pkg
|
with_pkg
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -44,5 +44,5 @@
|
||||||
net: host
|
net: host
|
||||||
state: running
|
state: running
|
||||||
env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}"
|
env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}"
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
|
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||||
when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
|
when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
|
||||||
|
|
|
@ -14,6 +14,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=MDS \
|
-e CEPH_DAEMON=MDS \
|
||||||
-e CEPHFS_CREATE=1 \
|
-e CEPHFS_CREATE=1 \
|
||||||
|
|
|
@ -53,7 +53,7 @@
|
||||||
- is_after_hammer
|
- is_after_hammer
|
||||||
|
|
||||||
- name: ceph monitor mkfs without keyring (for or after infernalis release)
|
- name: ceph monitor mkfs without keyring (for or after infernalis release)
|
||||||
command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
|
command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
|
||||||
args:
|
args:
|
||||||
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -20,6 +20,23 @@
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: "{{ ceph_mon_docker_username }}"
|
ceph_docker_username: "{{ ceph_mon_docker_username }}"
|
||||||
|
|
|
@ -126,3 +126,35 @@
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -85,7 +85,7 @@
|
||||||
state: "running"
|
state: "running"
|
||||||
privileged: "{{ mon_docker_privileged }}"
|
privileged: "{{ mon_docker_privileged }}"
|
||||||
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},CEPH_FSID={{ fsid }},{{ ceph_mon_extra_envs }}"
|
env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},CEPH_FSID={{ fsid }},{{ ceph_mon_extra_envs }}"
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
|
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||||
when:
|
when:
|
||||||
- ansible_os_family != 'RedHat'
|
- ansible_os_family != 'RedHat'
|
||||||
- ansible_os_family != 'CoreOS'
|
- ansible_os_family != 'CoreOS'
|
||||||
|
@ -99,6 +99,7 @@
|
||||||
state: "running"
|
state: "running"
|
||||||
privileged: "{{ mon_docker_privileged }}"
|
privileged: "{{ mon_docker_privileged }}"
|
||||||
env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
|
env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
|
||||||
|
volumes: "/etc/localtime:/etc/localtime:ro"
|
||||||
when:
|
when:
|
||||||
- ansible_os_family != 'RedHat'
|
- ansible_os_family != 'RedHat'
|
||||||
- ansible_os_family != 'CoreOS'
|
- ansible_os_family != 'CoreOS'
|
||||||
|
|
|
@ -10,13 +10,15 @@
|
||||||
with_items:
|
with_items:
|
||||||
- done
|
- done
|
||||||
- upstart
|
- upstart
|
||||||
when: not use_systemd
|
when:
|
||||||
|
- not use_systemd
|
||||||
|
|
||||||
- name: start and add that the monitor service to the init sequence (ubuntu)
|
- name: start and add that the monitor service to the init sequence (ubuntu)
|
||||||
command: initctl emit ceph-mon cluster={{ cluster }} id={{ monitor_name }}
|
command: initctl emit ceph-mon cluster={{ cluster }} id={{ monitor_name }}
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: not use_systemd
|
when:
|
||||||
|
- not use_systemd
|
||||||
|
|
||||||
# NOTE (leseb): somehow the service ansible module is messing things up
|
# NOTE (leseb): somehow the service ansible module is messing things up
|
||||||
# as a safety measure we run the raw command
|
# as a safety measure we run the raw command
|
||||||
|
|
|
@ -15,6 +15,7 @@ ExecStart=/usr/bin/docker run --rm --name %i --net=host \
|
||||||
-e KV_IP={{kv_endpoint}}\
|
-e KV_IP={{kv_endpoint}}\
|
||||||
-e KV_PORT={{kv_port}} \
|
-e KV_PORT={{kv_port}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
{% if mon_docker_privileged -%}
|
{% if mon_docker_privileged -%}
|
||||||
--privileged \
|
--privileged \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
|
@ -27,6 +27,28 @@ ceph_nfs_export_id: 20134
|
||||||
ceph_nfs_pseudo_path: "/ceph"
|
ceph_nfs_pseudo_path: "/ceph"
|
||||||
ceph_nfs_protocols: "3,4"
|
ceph_nfs_protocols: "3,4"
|
||||||
ceph_nfs_access_type: "RW"
|
ceph_nfs_access_type: "RW"
|
||||||
|
ceph_nfs_log_file: "/var/log/ganesha.log"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# FSAL Ceph Config #
|
||||||
|
####################
|
||||||
|
ceph_nfs_ceph_export_id: 20134
|
||||||
|
ceph_nfs_ceph_pseudo_path: "/ceph"
|
||||||
|
ceph_nfs_ceph_protocols: "3,4"
|
||||||
|
ceph_nfs_ceph_access_type: "RW"
|
||||||
|
|
||||||
|
###################
|
||||||
|
# FSAL RGW Config #
|
||||||
|
###################
|
||||||
|
ceph_nfs_rgw_export_id: 20134
|
||||||
|
ceph_nfs_rgw_pseudo_path: "/ceph"
|
||||||
|
ceph_nfs_rgw_protocols: "3,4"
|
||||||
|
ceph_nfs_rgw_access_type: "RW"
|
||||||
|
ceph_nfs_rgw_user: "cephnfs"
|
||||||
|
# Note: keys are optional and can be generated, but not on containerized, where
|
||||||
|
# they must be configered.
|
||||||
|
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
|
||||||
|
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
|
||||||
|
|
||||||
|
|
||||||
###################
|
###################
|
||||||
|
|
|
@ -1,10 +1,28 @@
|
||||||
---
|
---
|
||||||
- name: push ceph files to the ansible server
|
- name: set config and keys paths
|
||||||
fetch:
|
set_fact:
|
||||||
src: "{{ item.0 }}"
|
ceph_config_keys:
|
||||||
dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
|
- /etc/ceph/ceph.conf
|
||||||
flat: yes
|
- /etc/ceph/ceph.client.admin.keyring
|
||||||
|
- /var/lib/ceph/radosgw/keyring
|
||||||
|
|
||||||
|
- name: stat for config and keys
|
||||||
|
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
||||||
|
with_items: ceph_config_keys
|
||||||
|
changed_when: false
|
||||||
|
become: false
|
||||||
|
failed_when: false
|
||||||
|
register: statconfig
|
||||||
|
|
||||||
|
- name: try to fetch config and keys
|
||||||
|
copy:
|
||||||
|
src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
|
||||||
|
dest: "{{ item.0 }}"
|
||||||
|
owner: "64045"
|
||||||
|
group: "64045"
|
||||||
|
mode: 0644
|
||||||
|
changed_when: false
|
||||||
with_together:
|
with_together:
|
||||||
- ceph_config_keys
|
- ceph_config_keys
|
||||||
- statconfig.results
|
- statconfig.results
|
||||||
when: item.1.stat.exists == false
|
when: item.1.stat.exists == true
|
||||||
|
|
|
@ -7,6 +7,21 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
||||||
|
- name: create the nfs rgw user
|
||||||
|
docker:
|
||||||
|
image: "{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}:{{ ceph_rgw_docker_image_tag }}"
|
||||||
|
name: ceph-{{ ansible_hostname }}-rgw-user
|
||||||
|
expose: "{{ ceph_rgw_civetweb_port }}"
|
||||||
|
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
||||||
|
state: running
|
||||||
|
env: "CEPH_DAEMON=RGW_USER,RGW_USERNAME={{ ceph_nfs_rgw_user }},RGW_USER_ACCESS_KEY={{ ceph_nfs_rgw_access_key }},RGW_USER_SECRET_KEY={{ ceph_nfs_rgw_secret_key }}"
|
||||||
|
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
|
||||||
|
when: fsal_rgw
|
||||||
|
|
||||||
|
- name: get user create output
|
||||||
|
command: docker logs ceph-{{ ansible_hostname }}-rgw-user
|
||||||
|
register: rgwuser
|
||||||
|
|
||||||
- name: generate ganesha configuration file
|
- name: generate ganesha configuration file
|
||||||
action: config_template
|
action: config_template
|
||||||
args:
|
args:
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
- /var/lib/ceph/
|
- /var/lib/ceph/
|
||||||
|
- /var/lib/ceph/radosgw
|
||||||
when: not after_hammer
|
when: not after_hammer
|
||||||
|
|
||||||
- name: create bootstrap directories (after hammer)
|
- name: create bootstrap directories (after hammer)
|
||||||
|
@ -34,6 +35,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
- /var/lib/ceph/
|
- /var/lib/ceph/
|
||||||
|
- /var/lib/ceph/radosgw
|
||||||
when: after_hammer
|
when: after_hammer
|
||||||
|
|
||||||
- name: create ganesha directories
|
- name: create ganesha directories
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: set config and keys paths
|
- name: set config paths
|
||||||
set_fact:
|
set_fact:
|
||||||
ceph_config_keys:
|
nfs_config_keys:
|
||||||
- /etc/ceph/ceph.conf
|
|
||||||
- /etc/ganesha/ganesha.conf
|
- /etc/ganesha/ganesha.conf
|
||||||
|
|
||||||
- name: stat for config and keys
|
- name: stat for config and keys
|
||||||
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
||||||
with_items: ceph_config_keys
|
with_items: nfs_config_keys
|
||||||
changed_when: false
|
changed_when: false
|
||||||
become: false
|
become: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -22,6 +21,16 @@
|
||||||
mode: 0644
|
mode: 0644
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_together:
|
with_together:
|
||||||
- ceph_config_keys
|
- nfs_config_keys
|
||||||
- statconfig.results
|
- statconfig.results
|
||||||
when: item.1.stat.exists == true
|
when: item.1.stat.exists == true
|
||||||
|
|
||||||
|
- name: push ganesha files to the ansible server
|
||||||
|
fetch:
|
||||||
|
src: "{{ item.0 }}"
|
||||||
|
dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
|
||||||
|
flat: yes
|
||||||
|
with_together:
|
||||||
|
- nfs_config_keys
|
||||||
|
- statconfig.results
|
||||||
|
when: item.1.stat.exists == false
|
||||||
|
|
|
@ -20,6 +20,23 @@
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: "{{ ceph_nfs_docker_username }}"
|
ceph_docker_username: "{{ ceph_nfs_docker_username }}"
|
||||||
|
@ -28,12 +45,19 @@
|
||||||
|
|
||||||
- include: dirs_permissions.yml
|
- include: dirs_permissions.yml
|
||||||
|
|
||||||
# let the first ganesha create configs and keyrings
|
# Copy Ceph configs to host
|
||||||
|
- include: copy_configs.yml
|
||||||
|
|
||||||
|
- include: selinux.yml
|
||||||
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
|
# let the first ganesha create configs and users
|
||||||
- include: create_configs.yml
|
- include: create_configs.yml
|
||||||
when:
|
when:
|
||||||
inventory_hostname == groups.nfss[0] and
|
inventory_hostname == groups.nfss[0] and
|
||||||
mon_containerized_default_ceph_conf_with_kv
|
mon_containerized_default_ceph_conf_with_kv
|
||||||
|
|
||||||
|
# Copy Ganesha configs to host
|
||||||
- include: fetch_configs.yml
|
- include: fetch_configs.yml
|
||||||
when: not mon_containerized_deployment_with_kv
|
when: not mon_containerized_deployment_with_kv
|
||||||
|
|
||||||
|
@ -42,5 +66,3 @@
|
||||||
|
|
||||||
- include: start_docker_nfs.yml
|
- include: start_docker_nfs.yml
|
||||||
|
|
||||||
- include: copy_configs.yml
|
|
||||||
when: not mon_containerized_deployment_with_kv
|
|
||||||
|
|
|
@ -97,3 +97,35 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
privileged: true
|
privileged: true
|
||||||
ports: "{{ ceph_nfs_port }}:{{ ceph_nfs_port }},111:111"
|
ports: "{{ ceph_nfs_port }}:{{ ceph_nfs_port }},111:111"
|
||||||
env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}"
|
env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}"
|
||||||
volumes: "/etc/ceph:/etc/ceph,/etc/ganesha:/etc/ganesha"
|
volumes: "/etc/ceph:/etc/ceph,/etc/ganesha:/etc/ganesha,/etc/localtime:/etc/localtime:ro"
|
||||||
when:
|
when:
|
||||||
not is_atomic and
|
not is_atomic and
|
||||||
ansible_os_family != 'CoreOS' and
|
ansible_os_family != 'CoreOS' and
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
state: "running"
|
state: "running"
|
||||||
privileged: true
|
privileged: true
|
||||||
env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}"
|
env: "CEPH_DAEMON=NFS,CEPH_PUBLIC_NETWORK={{ ceph_nfs_docker_subnet }},{{ ceph_nfs_extra_envs }}"
|
||||||
volumes: "/etc/ganesha:/etc/ganesha"
|
volumes: "/etc/ganesha:/etc/ganesha,/etc/localtime:/etc/localtime:ro"
|
||||||
when:
|
when:
|
||||||
not is_atomic and
|
not is_atomic and
|
||||||
ansible_os_family != 'CoreOS' and
|
ansible_os_family != 'CoreOS' and
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: create NFS gateway directories
|
- name: create nfs gateway directories
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -10,7 +10,33 @@
|
||||||
- /var/lib/nfs/ganesha
|
- /var/lib/nfs/ganesha
|
||||||
- /var/run/ganesha
|
- /var/run/ganesha
|
||||||
|
|
||||||
- name: start NFS gateway service
|
- name: create rgw nfs user
|
||||||
|
command: radosgw-admin user create --uid={{ ceph_nfs_rgw_user }} --display-name="RGW NFS User"
|
||||||
|
register: rgwuser
|
||||||
|
when: fsal_rgw
|
||||||
|
|
||||||
|
- name: set access key
|
||||||
|
set_fact:
|
||||||
|
ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] }}"
|
||||||
|
when: fsal_rgw
|
||||||
|
|
||||||
|
- name: set secret key
|
||||||
|
set_fact:
|
||||||
|
ceph_nfs_rgw_secret_key: "{{(rgwuser.stdout | from_json)['keys'][0]['secret_key']}}"
|
||||||
|
when: fsal_rgw
|
||||||
|
|
||||||
|
- name: generate ganesha configuration file
|
||||||
|
action: config_template
|
||||||
|
args:
|
||||||
|
src: "{{ playbook_dir }}/roles/ceph-common/templates/ganesha.conf.j2"
|
||||||
|
dest: /etc/ganesha/ganesha.conf
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0644"
|
||||||
|
config_overrides: "{{ ganesha_conf_overrides }}"
|
||||||
|
config_type: ini
|
||||||
|
|
||||||
|
- name: start nfs gateway service
|
||||||
service:
|
service:
|
||||||
name: nfs-ganesha
|
name: nfs-ganesha
|
||||||
state: started
|
state: started
|
||||||
|
|
|
@ -7,7 +7,7 @@ After=docker.service
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker rm %i
|
ExecStartPre=-/usr/bin/docker rm %i
|
||||||
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
|
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
|
||||||
ExecStart=/usr/bin/docker run --rm --name %i --net=host \
|
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% if not mon_containerized_deployment_with_kv -%}
|
{% if not mon_containerized_deployment_with_kv -%}
|
||||||
-v /etc/ceph:/etc/ceph \
|
-v /etc/ceph:/etc/ceph \
|
||||||
-v /etc/ganesha:/etc/ganesha \
|
-v /etc/ganesha:/etc/ganesha \
|
||||||
|
@ -15,10 +15,10 @@ ExecStart=/usr/bin/docker run --rm --name %i --net=host \
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}}\
|
-e KV_IP={{kv_endpoint}}\
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=NFS \
|
-e CEPH_DAEMON=NFS \
|
||||||
-e CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }} \
|
--name=nfs-{{ ansible_hostname }} \
|
||||||
--name={{ ansible_hostname }} \
|
|
||||||
{{ ceph_nfs_docker_username }}/{{ ceph_nfs_docker_imagename }}:{{ ceph_nfs_docker_image_tag }}
|
{{ ceph_nfs_docker_username }}/{{ ceph_nfs_docker_imagename }}:{{ ceph_nfs_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop %i
|
ExecStopPost=-/usr/bin/docker stop %i
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
|
@ -113,7 +113,7 @@ bluestore: false
|
||||||
# Keys are stored into the monitors k/v store
|
# Keys are stored into the monitors k/v store
|
||||||
# Use 'true' to enable this scenario
|
# Use 'true' to enable this scenario
|
||||||
# Both journal and data are stored on the same dm-crypt encrypted device
|
# Both journal and data are stored on the same dm-crypt encrypted device
|
||||||
dmcrypt_journal_colocation: false
|
dmcrypt_journal_collocation: false
|
||||||
|
|
||||||
|
|
||||||
# VI. Encrypt osd data and/or journal devices with dm-crypt.
|
# VI. Encrypt osd data and/or journal devices with dm-crypt.
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
- item.value.removable == "0"
|
- item.value.removable == "0"
|
||||||
- item.value.partitions|count == 0
|
- item.value.partitions|count == 0
|
||||||
- osd_auto_discovery
|
- osd_auto_discovery
|
||||||
- dmcrypt_journal_colocation
|
- dmcrypt_journal_collocation
|
||||||
|
|
||||||
- name: activate osd(s) when device is a disk (dmcrypt)
|
- name: activate osd(s) when device is a disk (dmcrypt)
|
||||||
command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
|
command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
# https://github.com/ansible/ansible/issues/4297
|
# https://github.com/ansible/ansible/issues/4297
|
||||||
- name: combine ispartition results
|
- name: combine ispartition results
|
||||||
set_fact:
|
set_fact:
|
||||||
combined_activate_osd_disk_results: "{{ activate_osd_disk if not dmcrypt_journal_colocation else activate_osd_disk_dmcrypt }}"
|
combined_activate_osd_disk_results: "{{ activate_osd_disk if not dmcrypt_journal_collocation else activate_osd_disk_dmcrypt }}"
|
||||||
|
|
||||||
- name: fail if ceph-disk cannot create an OSD
|
- name: fail if ceph-disk cannot create an OSD
|
||||||
fail:
|
fail:
|
||||||
|
|
|
@ -20,6 +20,23 @@
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: '{{ ceph_osd_docker_username }}'
|
ceph_docker_username: '{{ ceph_osd_docker_username }}'
|
||||||
|
|
|
@ -125,3 +125,35 @@
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -28,10 +28,11 @@
|
||||||
-v /etc/ceph:/etc/ceph \
|
-v /etc/ceph:/etc/ceph \
|
||||||
-v /var/lib/ceph/:/var/lib/ceph/ \
|
-v /var/lib/ceph/:/var/lib/ceph/ \
|
||||||
-v /dev:/dev \
|
-v /dev:/dev \
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
-e "OSD_DEVICE={{ item.0 }}" \
|
-e "OSD_DEVICE={{ item.0 }}" \
|
||||||
-e "{{ ceph_osd_docker_prepare_env }}" \
|
-e "{{ ceph_osd_docker_prepare_env }}" \
|
||||||
"{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}" \
|
-e CEPH_DAEMON=osd_ceph_disk_prepare \
|
||||||
OSD_CEPH_DISK_PREPARE
|
"{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}"
|
||||||
with_together:
|
with_together:
|
||||||
- ceph_osd_docker_devices
|
- ceph_osd_docker_devices
|
||||||
- osd_prepared.results
|
- osd_prepared.results
|
||||||
|
@ -48,6 +49,7 @@
|
||||||
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 |
|
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 |
|
||||||
regex_replace('/', '') }}" \
|
regex_replace('/', '') }}" \
|
||||||
-v /dev:/dev \
|
-v /dev:/dev \
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
-e "OSD_DEVICE={{ item.0 }}" \
|
-e "OSD_DEVICE={{ item.0 }}" \
|
||||||
-e "{{ ceph_osd_docker_prepare_env }}" \
|
-e "{{ ceph_osd_docker_prepare_env }}" \
|
||||||
-e CEPH_DAEMON=osd_ceph_disk_prepare \
|
-e CEPH_DAEMON=osd_ceph_disk_prepare \
|
||||||
|
@ -106,7 +108,7 @@
|
||||||
state: started
|
state: started
|
||||||
privileged: yes
|
privileged: yes
|
||||||
env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
|
env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run"
|
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro,/dev:/dev,/run:/run"
|
||||||
with_items: ceph_osd_docker_devices
|
with_items: ceph_osd_docker_devices
|
||||||
when:
|
when:
|
||||||
- ansible_os_family != 'RedHat'
|
- ansible_os_family != 'RedHat'
|
||||||
|
@ -122,7 +124,7 @@
|
||||||
state: running
|
state: running
|
||||||
privileged: yes
|
privileged: yes
|
||||||
env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
|
env: "KV_TYPE={{kv_type}},KV_IP={{kv_endpoint}},OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
|
||||||
volumes: "/dev/:/dev/"
|
volumes: "/etc/localtime:/etc/localtime:ro,/dev/:/dev/"
|
||||||
with_items: ceph_osd_docker_devices
|
with_items: ceph_osd_docker_devices
|
||||||
when:
|
when:
|
||||||
- ansible_os_family != 'RedHat'
|
- ansible_os_family != 'RedHat'
|
||||||
|
|
|
@ -32,9 +32,9 @@
|
||||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
static: False
|
static: False
|
||||||
|
|
||||||
- include: ./scenarios/dmcrypt-journal-colocation.yml
|
- include: ./scenarios/dmcrypt-journal-collocation.yml
|
||||||
when:
|
when:
|
||||||
- dmcrypt_journal_colocation
|
- dmcrypt_journal_collocation
|
||||||
- not osd_containerized_deployment
|
- not osd_containerized_deployment
|
||||||
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
|
||||||
static: False
|
static: False
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
copy_admin_key: true
|
copy_admin_key: true
|
||||||
when:
|
when:
|
||||||
- dmcrypt_journal_colocation or dmcrypt_dedicated_journal
|
- dmcrypt_journal_collocation or dmcrypt_dedicated_journal
|
||||||
|
|
||||||
- name: copy osd bootstrap key
|
- name: copy osd bootstrap key
|
||||||
copy:
|
copy:
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
- ansible_devices is defined
|
- ansible_devices is defined
|
||||||
- item.value.removable == "0"
|
- item.value.removable == "0"
|
||||||
- item.value.partitions|count == 0
|
- item.value.partitions|count == 0
|
||||||
- dmcrypt_journal_colocation
|
- dmcrypt_journal_collocation
|
||||||
- osd_auto_discovery
|
- osd_auto_discovery
|
||||||
|
|
||||||
- name: manually prepare osd disk(s) (dmcrypt)
|
- name: manually prepare osd disk(s) (dmcrypt)
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
- not item.1.get("skipped")
|
- not item.1.get("skipped")
|
||||||
- item.0.get("rc", 0) != 0
|
- item.0.get("rc", 0) != 0
|
||||||
- item.1.get("rc", 0) != 0
|
- item.1.get("rc", 0) != 0
|
||||||
- dmcrypt_journal_colocation
|
- dmcrypt_journal_collocation
|
||||||
- not osd_auto_discovery
|
- not osd_auto_discovery
|
||||||
|
|
||||||
- include: ../activate_osds.yml
|
- include: ../activate_osds.yml
|
|
@ -15,6 +15,7 @@ ExecStart=/usr/bin/docker run --rm --net=host --pid=host\
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
-e KV_PORT={{kv_port}} \
|
-e KV_PORT={{kv_port}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
-v /dev:/dev \
|
-v /dev:/dev \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
||||||
|
|
|
@ -17,6 +17,24 @@
|
||||||
when: ceph_health.rc != 0
|
when: ceph_health.rc != 0
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: "{{ ceph_rbd_mirror_docker_username }}"
|
ceph_docker_username: "{{ ceph_rbd_mirror_docker_username }}"
|
||||||
|
|
|
@ -124,3 +124,35 @@
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -8,6 +8,5 @@
|
||||||
shell: chcon -Rt svirt_sandbox_file_t {{ item }}
|
shell: chcon -Rt svirt_sandbox_file_t {{ item }}
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph
|
- /etc/ceph
|
||||||
- /var/lib/ceph
|
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: sestatus.stdout != 'Disabled'
|
when: sestatus.stdout != 'Disabled'
|
||||||
|
|
|
@ -4,21 +4,14 @@
|
||||||
become: true
|
become: true
|
||||||
template:
|
template:
|
||||||
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
|
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
|
||||||
dest: /var/lib/ceph/ceph-rbd-mirror@.service
|
dest: /etc/systemd/system/ceph-rbd-mirror@.service
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
|
when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
|
||||||
|
|
||||||
- name: link systemd unit file for rbd mirror instance
|
|
||||||
file:
|
|
||||||
src: /var/lib/ceph/ceph-rbd-mirror@.service
|
|
||||||
dest: /etc/systemd/system/multi-user.target.wants/ceph-rbd-mirror@{{ ansible_hostname }}.service
|
|
||||||
state: link
|
|
||||||
when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
|
|
||||||
|
|
||||||
- name: enable systemd unit file for rbd mirror instance
|
- name: enable systemd unit file for rbd mirror instance
|
||||||
command: systemctl enable /etc/systemd/system/multi-user.target.wants/ceph-rbd-mirror@{{ ansible_hostname }}.service
|
command: systemctl enable ceph-rbd-mirror@{{ ansible_hostname }}.service
|
||||||
failed_when: false
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
|
when: ansible_os_family == 'RedHat' or ansible_os_family == 'CoreOS'
|
||||||
|
@ -39,9 +32,9 @@
|
||||||
|
|
||||||
- name: run the ceph rbd mirror docker image
|
- name: run the ceph rbd mirror docker image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}"
|
image: "{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}"
|
||||||
name: ceph-{{ ansible_hostname }}-rbd-mirror
|
name: "{{ ansible_hostname }}"
|
||||||
net: host
|
net: host
|
||||||
state: running
|
state: running
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
|
volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||||
when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
|
when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
|
||||||
|
|
|
@ -8,16 +8,16 @@ ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% if not rbd_mirror_containerized_deployment_with_kv -%}
|
{% if not rbd_mirror_containerized_deployment_with_kv -%}
|
||||||
-v /var/lib/ceph:/var/lib/ceph \
|
|
||||||
-v /etc/ceph:/etc/ceph \
|
-v /etc/ceph:/etc/ceph \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=RBD_MIRROR \
|
-e CEPH_DAEMON=RBD_MIRROR \
|
||||||
--name={{ ansible_hostname }} \
|
--name={{ ansible_hostname }} \
|
||||||
{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}
|
{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
|
|
|
@ -1,5 +1,31 @@
|
||||||
---
|
---
|
||||||
|
- name: check if it is Atomic host
|
||||||
|
stat: path=/run/ostree-booted
|
||||||
|
register: stat_ostree
|
||||||
|
|
||||||
|
- name: set fact for using Atomic host
|
||||||
|
set_fact:
|
||||||
|
is_atomic: '{{ stat_ostree.stat.exists }}'
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: "{{ ceph_restapi_docker_username }}"
|
ceph_docker_username: "{{ ceph_restapi_docker_username }}"
|
||||||
|
|
|
@ -122,3 +122,35 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -7,4 +7,4 @@
|
||||||
expose: "{{ ceph_restapi_port }}"
|
expose: "{{ ceph_restapi_port }}"
|
||||||
state: running
|
state: running
|
||||||
env: "RESTAPI_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_restapi_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=RESTAPI,{{ ceph_restapi_docker_extra_env }}"
|
env: "RESTAPI_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_restapi_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=RESTAPI,{{ ceph_restapi_docker_extra_env }}"
|
||||||
volumes: "/etc/ceph:/etc/ceph"
|
volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
---
|
||||||
|
- name: set config and keys paths
|
||||||
|
set_fact:
|
||||||
|
rgw_config_keys:
|
||||||
|
- "/var/lib/ceph/radosgw/{{ ansible_hostname }}/keyring"
|
||||||
|
when: fsal_rgw
|
||||||
|
|
||||||
|
- name: wait for rgw keyring
|
||||||
|
wait_for: path="/var/lib/ceph/radosgw/{{ ansible_hostname }}/keyring"
|
||||||
|
when:
|
||||||
|
- fsal_rgw
|
||||||
|
- inventory_hostname == groups.rgws[0]
|
||||||
|
|
||||||
|
- name: stat for config and keys
|
||||||
|
local_action: stat path={{ fetch_directory }}/docker_mon_files/{{ item }}
|
||||||
|
with_items: rgw_config_keys
|
||||||
|
changed_when: false
|
||||||
|
become: false
|
||||||
|
failed_when: false
|
||||||
|
register: statconfig
|
||||||
|
when:
|
||||||
|
- fsal_rgw
|
||||||
|
- inventory_hostname == groups.rgws[0]
|
||||||
|
|
||||||
|
- name: push ceph files to the ansible server
|
||||||
|
fetch:
|
||||||
|
src: "{{ item.0 }}"
|
||||||
|
dest: "{{ fetch_directory }}/docker_mon_files/var/lib/ceph/radosgw/keyring"
|
||||||
|
flat: yes
|
||||||
|
with_together:
|
||||||
|
- rgw_config_keys
|
||||||
|
- statconfig.results
|
||||||
|
when:
|
||||||
|
- item.1.stat.exists == false
|
||||||
|
- fsal_rgw
|
||||||
|
- inventory_hostname == groups.rgws[0]
|
|
@ -17,6 +17,24 @@
|
||||||
when: ceph_health.rc != 0
|
when: ceph_health.rc != 0
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
- include: pre_requisite.yml
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
|
when:
|
||||||
|
- is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_redhat.yml"
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_debian.yml"
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/docker/fetch_image.yml"
|
||||||
vars:
|
vars:
|
||||||
ceph_docker_username: "{{ ceph_rgw_docker_username }}"
|
ceph_docker_username: "{{ ceph_rgw_docker_username }}"
|
||||||
|
@ -31,3 +49,5 @@
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
- include: start_docker_rgw.yml
|
- include: start_docker_rgw.yml
|
||||||
|
|
||||||
|
- include: copy_configs.yml
|
||||||
|
|
|
@ -110,3 +110,35 @@
|
||||||
enabled: yes
|
enabled: yes
|
||||||
tags:
|
tags:
|
||||||
with_pkg
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using yum
|
||||||
|
yum:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'yum'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on redhat using dnf
|
||||||
|
dnf:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'RedHat'
|
||||||
|
- ansible_pkg_mgr == 'dnf'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp on debian
|
||||||
|
apt:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when:
|
||||||
|
- ansible_os_family == 'Debian'
|
||||||
|
- ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
|
@ -45,5 +45,5 @@
|
||||||
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
||||||
state: running
|
state: running
|
||||||
env: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}"
|
env: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}"
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
|
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||||
when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
|
when: ansible_os_family != 'RedHat' and ansible_os_family != 'CoreOS'
|
||||||
|
|
|
@ -41,6 +41,14 @@
|
||||||
group: "{{ key_group }}"
|
group: "{{ key_group }}"
|
||||||
when: cephx
|
when: cephx
|
||||||
|
|
||||||
|
- name: ensure ceph-radosgw systemd unit file is present
|
||||||
|
command: chkconfig --add ceph-radosgw
|
||||||
|
args:
|
||||||
|
creates: /var/run/systemd/generator.late/ceph-radosgw.service
|
||||||
|
when:
|
||||||
|
- ansible_os_family == "RedHat"
|
||||||
|
- is_before_infernalis
|
||||||
|
|
||||||
- name: activate rados gateway with upstart
|
- name: activate rados gateway with upstart
|
||||||
file:
|
file:
|
||||||
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}
|
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}
|
||||||
|
|
|
@ -14,6 +14,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=RGW \
|
-e CEPH_DAEMON=RGW \
|
||||||
--name={{ ansible_hostname }} \
|
--name={{ ansible_hostname }} \
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
#Package lines can be commented out with '#'
|
||||||
|
#
|
||||||
|
#boost-atomic
|
||||||
|
#boost-chrono
|
||||||
|
#boost-date-time
|
||||||
|
#boost-iostreams
|
||||||
|
#boost-program
|
||||||
|
#boost-random
|
||||||
|
#boost-regex
|
||||||
|
#boost-system
|
||||||
|
#boost-thread
|
||||||
|
#bzip2-libs
|
||||||
|
#cyrus-sasl-lib
|
||||||
|
#expat
|
||||||
|
#fcgi
|
||||||
|
#fuse-libs
|
||||||
|
#glibc
|
||||||
|
#hdparm
|
||||||
|
#keyutils-libs
|
||||||
|
#leveldb
|
||||||
|
#libaio
|
||||||
|
#libatomic_ops
|
||||||
|
#libattr
|
||||||
|
#libblkid
|
||||||
|
#libcap
|
||||||
|
#libcom_err
|
||||||
|
#libcurl
|
||||||
|
#libgcc
|
||||||
|
#libicu
|
||||||
|
#libidn
|
||||||
|
#libnghttp2
|
||||||
|
#libpsl
|
||||||
|
#libselinux
|
||||||
|
#libssh2
|
||||||
|
#libstdc++
|
||||||
|
#libunistring
|
||||||
|
#nss-softokn-freebl
|
||||||
|
#openldap
|
||||||
|
#openssl-libs
|
||||||
|
#pcre
|
||||||
|
#python-nose
|
||||||
|
#python-sphinx
|
||||||
|
#snappy
|
||||||
|
#systemd-libs
|
||||||
|
#zlib
|
|
@ -0,0 +1,27 @@
|
||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
|
||||||
|
#
|
||||||
|
# Author: Daniel Lin <danielin@umich.edu>
|
||||||
|
#
|
||||||
|
# This library is free software; you can redistribute it and/or
|
||||||
|
# modify it under the terms of the GNU Lesser General Public
|
||||||
|
# License as published by the Free Software Foundation; either
|
||||||
|
# version 2.1 of the License, or (at your option) any later version.
|
||||||
|
#
|
||||||
|
|
||||||
|
if test -f /etc/redhat-release ; then
|
||||||
|
PACKAGE_INSTALLER=yum
|
||||||
|
elif type apt-get > /dev/null 2>&1 ; then
|
||||||
|
PACKAGE_INSTALLER=apt-get
|
||||||
|
else
|
||||||
|
echo "ERROR: Package Installer could not be determined"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
while read p; do
|
||||||
|
if [[ $p =~ ^#.* ]] ; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
$PACKAGE_INSTALLER install $p -y
|
||||||
|
done < $1
|
|
@ -8,6 +8,7 @@ osd_vms: 1
|
||||||
mds_vms: 0
|
mds_vms: 0
|
||||||
rgw_vms: 0
|
rgw_vms: 0
|
||||||
nfs_vms: 0
|
nfs_vms: 0
|
||||||
|
rbd_mirror_vms: 0
|
||||||
client_vms: 0
|
client_vms: 0
|
||||||
|
|
||||||
# Deploy RESTAPI on each of the Monitors
|
# Deploy RESTAPI on each of the Monitors
|
||||||
|
@ -23,6 +24,9 @@ disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||||
|
|
||||||
eth: 'enp0s8'
|
eth: 'enp0s8'
|
||||||
vagrant_box: centos/atomic-host
|
vagrant_box: centos/atomic-host
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
|
||||||
# if vagrant fails to attach storage controller, add the storage controller name by:
|
# if vagrant fails to attach storage controller, add the storage controller name by:
|
||||||
# VBoxManage storagectl `VBoxManage list vms |grep ceph-ansible_osd0|awk '{print $1}'|tr \" ' '` --name "SATA" --add sata
|
# VBoxManage storagectl `VBoxManage list vms |grep ceph-ansible_osd0|awk '{print $1}'|tr \" ' '` --name "SATA" --add sata
|
||||||
|
|
|
@ -44,6 +44,9 @@ disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||||
vagrant_box: ubuntu/trusty64
|
vagrant_box: ubuntu/trusty64
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
vagrant_sync_dir: /home/vagrant/sync
|
||||||
# VAGRANT URL
|
# VAGRANT URL
|
||||||
# This is a URL to download an image from an alternate location. vagrant_box
|
# This is a URL to download an image from an alternate location. vagrant_box
|
||||||
# above should be set to the filename of the image.
|
# above should be set to the filename of the image.
|
||||||
|
|
Loading…
Reference in New Issue