address Ansible linter errors

This addresses all errors reported by the Ansible linter.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
pull/7476/head
Guillaume Abrioux 2024-02-14 11:14:02 +01:00
parent 7d25a5d565
commit 18da10bb7a
245 changed files with 5490 additions and 4948 deletions

View File

@ -10,10 +10,7 @@ jobs:
with:
python-version: '3.10'
architecture: x64
- run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint==6.16.0 netaddr
- run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint netaddr
- run: ansible-galaxy install -r requirements.yml
- run: ansible-lint -x 106,204,205,208 -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site-container.yml.sample dashboard.yml
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample --syntax-check --list-tasks -vv
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts site-container.yml.sample --syntax-check --list-tasks -vv
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts dashboard.yml --syntax-check --list-tasks -vv
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv
- run: ansible-lint -x 'yaml[line-length],role-name,run-once' -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site.yml.sample dashboard.yml
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample site-container.yml.sample dashboard.yml infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv

View File

@ -1,5 +1,6 @@
---
- hosts:
- name: Deploy node_exporter
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ mds_group_name|default('mdss') }}"
@ -12,75 +13,91 @@
gather_facts: false
become: true
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- name: set ceph node exporter install 'In Progress'
- name: Set ceph node exporter install 'In Progress'
run_once: true
set_stats:
ansible.builtin.set_stats:
data:
installer_phase_ceph_node_exporter:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
- name: Import ceph-container-engine
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry
when:
- not containerized_deployment | bool
- ceph_docker_registry_auth | bool
- import_role:
- name: Import ceph-node-exporter role
ansible.builtin.import_role:
name: ceph-node-exporter
post_tasks:
- name: set ceph node exporter install 'Complete'
- name: Set ceph node exporter install 'Complete'
run_once: true
set_stats:
ansible.builtin.set_stats:
data:
installer_phase_ceph_node_exporter:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: "{{ monitoring_group_name | default('monitoring') }}"
- name: Deploy grafana and prometheus
hosts: "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false
become: true
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- name: set ceph grafana install 'In Progress'
- name: Set ceph grafana install 'In Progress'
run_once: true
set_stats:
ansible.builtin.set_stats:
data:
installer_phase_ceph_grafana:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
# - ansible.builtin.import_role:
# name: ceph-facts
# tags: ['ceph_update_config']
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana
tags: ['ceph_update_config']
- import_role:
- name: Import ceph-prometheus role
ansible.builtin.import_role:
name: ceph-prometheus
- import_role:
- name: Import ceph-grafana role
ansible.builtin.import_role:
name: ceph-grafana
post_tasks:
- name: set ceph grafana install 'Complete'
- name: Set ceph grafana install 'Complete'
run_once: true
set_stats:
ansible.builtin.set_stats:
data:
installer_phase_ceph_grafana:
status: "Complete"
@ -88,37 +105,44 @@
# using groups[] here otherwise it can't fallback to the mon if there's no mgr group.
# adding an additional | default(omit) in case where no monitors are present (external ceph cluster)
- hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}"
- name: Deploy dashboard
hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}"
gather_facts: false
become: true
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- name: set ceph dashboard install 'In Progress'
- name: Set ceph dashboard install 'In Progress'
run_once: true
set_stats:
ansible.builtin.set_stats:
data:
installer_phase_ceph_dashboard:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
# - name: Import ceph-facts role
# ansible.builtin.import_role:
# name: ceph-facts
# tags: ['ceph_update_config']
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana
tags: ['ceph_update_config']
- import_role:
- name: Import ceph-dashboard role
ansible.builtin.import_role:
name: ceph-dashboard
post_tasks:
- name: set ceph dashboard install 'Complete'
- name: Set ceph dashboard install 'Complete'
run_once: true
set_stats:
ansible.builtin.set_stats:
data:
installer_phase_ceph_dashboard:
status: "Complete"

View File

@ -74,7 +74,7 @@ dummy:
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
# with each others.
#configure_firewall: True
#configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it
#ceph_mon_firewall_zone: public
@ -120,7 +120,7 @@ dummy:
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
#upgrade_ceph_packages: False
#upgrade_ceph_packages: false
#ceph_use_distro_backports: false # DEBIAN ONLY
#ceph_directories_mode: "0755"
@ -171,7 +171,7 @@ dummy:
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -229,7 +229,7 @@ dummy:
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
#
#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
@ -238,14 +238,14 @@ dummy:
# Enabled when ceph_repository == 'local'
#
# Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/"
# ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
#use_installer: false
# use_installer: false
# Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible"
# ansible_dir: "/path/to/ceph-ansible"
######################
@ -328,12 +328,12 @@ dummy:
#ip_version: ipv4
#mon_host_v1:
# enabled: True
# enabled: true
# suffix: ':6789'
#mon_host_v2:
# suffix: ':3300'
#enable_ceph_volume_debug: False
#enable_ceph_volume_debug: false
##########
# CEPHFS #
@ -405,7 +405,7 @@ dummy:
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
# common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
@ -527,16 +527,16 @@ dummy:
#ceph_docker_image_tag: latest-main
#ceph_docker_registry: quay.io
#ceph_docker_registry_auth: false
#ceph_docker_registry_username:
#ceph_docker_registry_password:
#ceph_docker_http_proxy:
#ceph_docker_https_proxy:
# ceph_docker_registry_username:
# ceph_docker_registry_password:
# ceph_docker_http_proxy:
# ceph_docker_https_proxy:
#ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }}
#ceph_client_docker_image: "{{ ceph_docker_image }}"
#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
#containerized_deployment: False
#containerized_deployment: false
#container_binary:
#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
@ -563,7 +563,7 @@ dummy:
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
# pg_autoscale_mode: False
# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
@ -613,7 +613,7 @@ dummy:
#############
# DASHBOARD #
#############
#dashboard_enabled: True
#dashboard_enabled: true
# Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
@ -625,7 +625,7 @@ dummy:
#dashboard_admin_user: admin
#dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True
#dashboard_admin_password: p@ssw0rd
# dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections
#dashboard_crt: ''
#dashboard_key: ''
@ -634,7 +634,7 @@ dummy:
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_rgw_api_user_id: ceph-dashboard
#dashboard_rgw_api_admin_resource: ''
#dashboard_rgw_api_no_ssl_verify: False
#dashboard_rgw_api_no_ssl_verify: false
#dashboard_frontend_vip: ''
#dashboard_disabled_features: []
#prometheus_frontend_vip: ''
@ -643,7 +643,7 @@ dummy:
#node_exporter_port: 9100
#grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True
#grafana_admin_password: admin
# grafana_admin_password: admin
# We only need this for SSL (https) connections
#grafana_crt: ''
#grafana_key: ''
@ -675,7 +675,7 @@ dummy:
#grafana_plugins:
# - vonage-status-panel
# - grafana-piechart-panel
#grafana_allow_embedding: True
#grafana_allow_embedding: true
#grafana_port: 3000
#grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {}
@ -691,7 +691,7 @@ dummy:
#prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data.
#prometheus_storage_tsdb_retention_time: 15d
# prometheus_storage_tsdb_retention_time: 15d
#alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
#alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2
@ -749,11 +749,11 @@ dummy:
#
# Example:
#
#rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
# rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
@ -767,20 +767,19 @@ dummy:
#
# Example:
#
#client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
# client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {}
#no_log_on_ceph_key_tasks: True
#no_log_on_ceph_key_tasks: true
###############
# DEPRECATION #
###############
######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM #
@ -788,5 +787,5 @@ dummy:
#container_exec_cmd:
#docker: false
#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -45,6 +45,6 @@ dummy:
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
#keys:
# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }

View File

@ -13,13 +13,13 @@ dummy:
# GENERAL #
###########
# Whether or not to generate secure certificate to iSCSI gateway nodes
#generate_crt: False
#generate_crt: false
#iscsi_conf_overrides: {}
#iscsi_pool_name: rbd
#iscsi_pool_size: 3
# iscsi_pool_size: 3
#copy_admin_key: True
#copy_admin_key: true
##################
# RBD-TARGET-API #

View File

@ -43,7 +43,7 @@ dummy:
# ceph_mds_systemd_overrides will override the systemd settings
# for the ceph-mds services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_mds_systemd_overrides:
# Service:
# PrivateDevices: False
# ceph_mds_systemd_overrides:
# Service:
# PrivateDevices: false

View File

@ -54,7 +54,7 @@ dummy:
# ceph_mgr_systemd_overrides will override the systemd settings
# for the ceph-mgr services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_mgr_systemd_overrides:
# Service:
# PrivateDevices: False
# ceph_mgr_systemd_overrides:
# Service:
# PrivateDevices: false

View File

@ -64,7 +64,7 @@ dummy:
# ceph_mon_systemd_overrides will override the systemd settings
# for the ceph-mon services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_mon_systemd_overrides:
# Service:
# PrivateDevices: False
# ceph_mon_systemd_overrides:
# Service:
# PrivateDevices: false

View File

@ -92,8 +92,8 @@ dummy:
#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
# Note: keys are optional and can be generated, but not on containerized, where
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
@ -106,19 +106,19 @@ dummy:
# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
#
# Example:
#CACHEINODE {
# #Entries_HWMark = 100000;
#}
# CACHEINODE {
# # Entries_HWMark = 100000;
# }
#
#ganesha_core_param_overrides:
#ganesha_ceph_export_overrides:
#ganesha_rgw_export_overrides:
#ganesha_rgw_section_overrides:
#ganesha_log_overrides:
#ganesha_conf_overrides: |
# CACHEINODE {
# #Entries_HWMark = 100000;
# }
# ganesha_core_param_overrides:
# ganesha_ceph_export_overrides:
# ganesha_rgw_export_overrides:
# ganesha_rgw_section_overrides:
# ganesha_log_overrides:
# ganesha_conf_overrides: |
# CACHEINODE {
# # Entries_HWMark = 100000;
# }
##########
# DOCKER #

View File

@ -33,31 +33,31 @@ dummy:
# All scenario(except 3rd) inherit from the following device declaration
# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs
#devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde
# devices:
# - /dev/sdb
# - /dev/sdc
# - /dev/sdd
# - /dev/sde
#devices: []
# Declare devices to be used as block.db devices
#dedicated_devices:
# - /dev/sdx
# - /dev/sdy
# dedicated_devices:
# - /dev/sdx
# - /dev/sdy
#dedicated_devices: []
# Declare devices to be used as block.wal devices
#bluestore_wal_devices:
# - /dev/nvme0n1
# - /dev/nvme0n2
# bluestore_wal_devices:
# - /dev/nvme0n1
# - /dev/nvme0n2
#bluestore_wal_devices: []
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using
@ -68,7 +68,7 @@ dummy:
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore you use the data will be encrypted
#dmcrypt: False
#dmcrypt: true
# Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries.
@ -177,8 +177,8 @@ dummy:
# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
# then, the following would run the OSD on the first NUMA node only.
#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
#ceph_osd_docker_cpuset_mems: "0"
# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
# ceph_osd_docker_cpuset_mems: "0"
# PREPARE DEVICE
#
@ -199,9 +199,9 @@ dummy:
# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_osd_systemd_overrides:
# Service:
# PrivateDevices: False
# ceph_osd_systemd_overrides:
# Service:
# PrivateDevices: false
###########

View File

@ -49,7 +49,7 @@ dummy:
# ceph_rbd_mirror_systemd_overrides will override the systemd settings
# for the ceph-rbd-mirror services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_rbd_mirror_systemd_overrides:
# Service:
# PrivateDevices: False
# ceph_rbd_mirror_systemd_overrides:
# Service:
# PrivateDevices: false

View File

@ -26,10 +26,10 @@ dummy:
# - no-tlsv11
# - no-tls-tickets
#
#virtual_ips:
# - 192.168.238.250
# - 192.168.238.251
# virtual_ips:
# - 192.168.238.250
# - 192.168.238.251
#
#virtual_ip_netmask: 24
#virtual_ip_interface: ens33
# virtual_ip_netmask: 24
# virtual_ip_interface: ens33

View File

@ -45,30 +45,30 @@ dummy:
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.
#rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
# rule_name: foo
# rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64
# type: ec
# ec_profile: myecprofile
# ec_k: 5
# ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.meta":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.log":
# pg_num: 8
# size: 3
# type: replicated
# "{{ rgw_zone }}.rgw.control":
# pg_num: 8
# size: 3
# type: replicated
# rule_name: foo
##########
@ -81,8 +81,8 @@ dummy:
# These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
#ceph_rgw_docker_memory_limit: "4096m"
#ceph_rgw_docker_cpu_limit: 8
#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
#ceph_rgw_docker_cpuset_mems: "0"
# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
# ceph_rgw_docker_cpuset_mems: "0"
#ceph_rgw_docker_extra_env:
#ceph_config_keys: [] # DON'T TOUCH ME
@ -94,7 +94,7 @@ dummy:
# ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify:
#ceph_rgw_systemd_overrides:
# Service:
# PrivateDevices: False
# ceph_rgw_systemd_overrides:
# Service:
# PrivateDevices: false

View File

@ -74,7 +74,7 @@ dummy:
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
# with each others.
#configure_firewall: True
#configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it
#ceph_mon_firewall_zone: public
@ -120,7 +120,7 @@ dummy:
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
#upgrade_ceph_packages: False
#upgrade_ceph_packages: false
#ceph_use_distro_backports: false # DEBIAN ONLY
#ceph_directories_mode: "0755"
@ -171,7 +171,7 @@ ceph_repository: rhcs
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -229,7 +229,7 @@ ceph_iscsi_config_dev: false
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
#
#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
@ -238,14 +238,14 @@ ceph_iscsi_config_dev: false
# Enabled when ceph_repository == 'local'
#
# Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/"
# ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
#use_installer: false
# use_installer: false
# Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible"
# ansible_dir: "/path/to/ceph-ansible"
######################
@ -328,12 +328,12 @@ ceph_iscsi_config_dev: false
#ip_version: ipv4
#mon_host_v1:
# enabled: True
# enabled: true
# suffix: ':6789'
#mon_host_v2:
# suffix: ':3300'
#enable_ceph_volume_debug: False
#enable_ceph_volume_debug: false
##########
# CEPHFS #
@ -405,7 +405,7 @@ ceph_iscsi_config_dev: false
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
# common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
@ -527,10 +527,10 @@ ceph_docker_image: "rhceph/rhceph-5-rhel8"
ceph_docker_image_tag: "latest"
ceph_docker_registry: "registry.redhat.io"
ceph_docker_registry_auth: true
#ceph_docker_registry_username:
#ceph_docker_registry_password:
#ceph_docker_http_proxy:
#ceph_docker_https_proxy:
# ceph_docker_registry_username:
# ceph_docker_registry_password:
# ceph_docker_http_proxy:
# ceph_docker_https_proxy:
#ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }}
#ceph_client_docker_image: "{{ ceph_docker_image }}"
@ -563,7 +563,7 @@ containerized_deployment: true
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
# pg_autoscale_mode: False
# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
@ -613,7 +613,7 @@ containerized_deployment: true
#############
# DASHBOARD #
#############
#dashboard_enabled: True
#dashboard_enabled: true
# Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
@ -625,7 +625,7 @@ containerized_deployment: true
#dashboard_admin_user: admin
#dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True
#dashboard_admin_password: p@ssw0rd
# dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections
#dashboard_crt: ''
#dashboard_key: ''
@ -634,7 +634,7 @@ containerized_deployment: true
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_rgw_api_user_id: ceph-dashboard
#dashboard_rgw_api_admin_resource: ''
#dashboard_rgw_api_no_ssl_verify: False
#dashboard_rgw_api_no_ssl_verify: false
#dashboard_frontend_vip: ''
#dashboard_disabled_features: []
#prometheus_frontend_vip: ''
@ -643,7 +643,7 @@ node_exporter_container_image: registry.redhat.io/openshift4/ose-prometheus-node
#node_exporter_port: 9100
#grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True
#grafana_admin_password: admin
# grafana_admin_password: admin
# We only need this for SSL (https) connections
#grafana_crt: ''
#grafana_key: ''
@ -675,7 +675,7 @@ grafana_container_image: registry.redhat.io/rhceph/rhceph-5-dashboard-rhel8:5
#grafana_plugins:
# - vonage-status-panel
# - grafana-piechart-panel
#grafana_allow_embedding: True
#grafana_allow_embedding: true
#grafana_port: 3000
#grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {}
@ -691,7 +691,7 @@ prometheus_container_image: registry.redhat.io/openshift4/ose-prometheus:v4.6
#prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data.
#prometheus_storage_tsdb_retention_time: 15d
# prometheus_storage_tsdb_retention_time: 15d
alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6
#alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2
@ -749,11 +749,11 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
#
# Example:
#
#rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
# rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
@ -767,20 +767,19 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
#
# Example:
#
#client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
# client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {}
#no_log_on_ceph_key_tasks: True
#no_log_on_ceph_key_tasks: true
###############
# DEPRECATION #
###############
######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM #
@ -788,5 +787,5 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
#container_exec_cmd:
#docker: false
#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -6,26 +6,30 @@
# Ensure that all monitors are present in the mons
# group in your inventory so that the ceph configuration file
# is created correctly for the new OSD(s).
- hosts: mons
- name: Pre-requisites operations for adding new monitor(s)
hosts: mons
gather_facts: false
vars:
delegate_facts_host: true
become: true
pre_tasks:
- import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
- name: Import raw_install_python tasks
ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
- name: gather facts
setup:
- name: Gather facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: gather and delegate facts
setup:
- name: Gather and delegate facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
@ -36,52 +40,84 @@
run_once: true
when: delegate_facts_host | bool
tasks:
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-validate role
ansible.builtin.import_role:
name: ceph-validate
- import_role:
- name: Import ceph-infra role
ansible.builtin.import_role:
name: ceph-infra
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-common role
ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
when: containerized_deployment | bool
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- hosts: mons
- name: Deploy Ceph monitors
hosts: mons
gather_facts: false
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config
- import_role:
- name: Import ceph-mon role
ansible.builtin.import_role:
name: ceph-mon
- import_role:
- name: Import ceph-crash role
ansible.builtin.import_role:
name: ceph-crash
when: containerized_deployment | bool
# update config files on OSD nodes
- hosts: osds
- name: Update config file on OSD nodes
hosts: osds
gather_facts: true
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config

View File

@ -19,12 +19,13 @@
# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01
# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01
- hosts: localhost
- name: Backup and restore Ceph files
hosts: localhost
become: true
gather_facts: true
tasks:
- name: exit playbook, if user did not set the source node
fail:
- name: Exit playbook, if user did not set the source node
ansible.builtin.fail:
msg: >
"You must pass the node name: -e target_node=<inventory_name>.
The name must match what is set in your inventory."
@ -32,71 +33,73 @@
- target_node is not defined
or target_node not in groups.get('all', [])
- name: exit playbook, if user did not set the backup directory
fail:
- name: Exit playbook, if user did not set the backup directory
ansible.builtin.fail:
msg: >
"you must pass the backup directory path: -e backup_dir=<backup directory path>"
when: backup_dir is not defined
- name: exit playbook, if user did not set the playbook mode (backup|restore)
fail:
- name: Exit playbook, if user did not set the playbook mode (backup|restore)
ansible.builtin.fail:
msg: >
"you must pass the mode: -e mode=<backup|restore>"
when:
- mode is not defined
or mode not in ['backup', 'restore']
- name: gather facts on source node
setup:
- name: Gather facts on source node
ansible.builtin.setup:
delegate_to: "{{ target_node }}"
delegate_facts: true
- name: backup mode
- name: Backup mode
when: mode == 'backup'
block:
- name: create a temp directory
- name: Create a temp directory
ansible.builtin.tempfile:
state: directory
suffix: ansible-archive-ceph
register: tmp_dir
delegate_to: "{{ target_node }}"
- name: archive files
archive:
- name: Archive files
community.general.archive:
path: "{{ item }}"
dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
format: tar
mode: "0644"
delegate_to: "{{ target_node }}"
loop:
- /etc/ceph
- /var/lib/ceph
- name: create backup directory
- name: Create backup directory
become: false
file:
ansible.builtin.file:
path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}"
state: directory
mode: "0755"
- name: backup files
fetch:
- name: Backup files
ansible.builtin.fetch:
src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
flat: yes
flat: true
loop:
- /etc/ceph
- /var/lib/ceph
delegate_to: "{{ target_node }}"
- name: remove temp directory
file:
- name: Remove temp directory
ansible.builtin.file:
path: "{{ tmp_dir.path }}"
state: absent
delegate_to: "{{ target_node }}"
- name: restore mode
- name: Restore mode
when: mode == 'restore'
block:
- name: unarchive files
- name: Unarchive files
ansible.builtin.unarchive:
src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
dest: "{{ item | dirname }}"

View File

@ -4,7 +4,8 @@
#
# It currently runs on localhost
- hosts: localhost
- name: CephX key management examples
hosts: localhost
gather_facts: false
vars:
cluster: ceph
@ -17,12 +18,12 @@
- client.leseb1
- client.pythonnnn
keys_to_create:
- { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" }
- { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
- { name: client.path, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
- { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" }, mode: "0600" }
- { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" }
- { name: client.path, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" }
tasks:
- name: create ceph key(s) module
- name: Create ceph key(s) module
ceph_key:
name: "{{ item.name }}"
caps: "{{ item.caps }}"
@ -31,7 +32,7 @@
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}"
- name: update ceph key(s)
- name: Update ceph key(s)
ceph_key:
name: "{{ item.name }}"
state: update
@ -40,7 +41,7 @@
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}"
- name: delete ceph key(s)
- name: Delete ceph key(s)
ceph_key:
name: "{{ item }}"
state: absent
@ -48,7 +49,7 @@
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_delete }}"
- name: info ceph key(s)
- name: Info ceph key(s)
ceph_key:
name: "{{ item }}"
state: info
@ -58,7 +59,7 @@
ignore_errors: true
with_items: "{{ keys_to_info }}"
- name: list ceph key(s)
- name: List ceph key(s)
ceph_key:
state: list
cluster: "{{ cluster }}"
@ -66,7 +67,7 @@
register: list_keys
ignore_errors: true
- name: fetch_initial_keys
- name: Fetch_initial_keys # noqa: ignore-errors
ceph_key:
state: fetch_initial_keys
cluster: "{{ cluster }}"

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
---
- name: gather facts and prepare system for cephadm
- name: Gather facts and prepare system for cephadm
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
@ -15,23 +15,24 @@
vars:
delegate_facts_host: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: validate if monitor group doesn't exist or empty
fail:
- name: Validate if monitor group doesn't exist or empty
ansible.builtin.fail:
msg: "you must add a [mons] group and add at least one node."
run_once: true
when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0
- name: validate if manager group doesn't exist or empty
fail:
- name: Validate if manager group doesn't exist or empty
ansible.builtin.fail:
msg: "you must add a [mgrs] group and add at least one node."
run_once: true
when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0
- name: validate monitor network configuration
fail:
- name: Validate monitor network configuration
ansible.builtin.fail:
msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
when:
- mon_group_name in group_names
@ -39,38 +40,38 @@
- monitor_address_block == 'subnet'
- monitor_interface == 'interface'
- name: validate dashboard configuration
- name: Validate dashboard configuration
when: dashboard_enabled | bool
run_once: true
block:
- name: fail if [monitoring] group doesn't exist or empty
fail:
- name: Fail if [monitoring] group doesn't exist or empty
ansible.builtin.fail:
msg: "you must add a [monitoring] group and add at least one node."
when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0
- name: fail when dashboard_admin_password is not set
fail:
- name: Fail when dashboard_admin_password is not set
ansible.builtin.fail:
msg: "you must set dashboard_admin_password."
when: dashboard_admin_password is undefined
- name: validate container registry credentials
fail:
- name: Validate container registry credentials
ansible.builtin.fail:
msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set'
when:
- ceph_docker_registry_auth | bool
- (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or
(ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0)
- name: gather facts
setup:
- name: Gather facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool
- name: gather and delegate facts
setup:
- name: Gather and delegate facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
@ -81,76 +82,82 @@
run_once: true
when: delegate_facts_host | bool
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: check if it is atomic host
stat:
- name: Check if it is atomic host
ansible.builtin.stat:
path: /run/ostree-booted
register: stat_ostree
- name: set_fact is_atomic
set_fact:
- name: Set_fact is_atomic
ansible.builtin.set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry.yml
when: ceph_docker_registry_auth | bool
- name: configure repository for installing cephadm
- name: Configure repository for installing cephadm
vars:
ceph_origin: repository
ceph_repository: community
block:
- name: validate repository variables
import_role:
- name: Validate repository variables
ansible.builtin.import_role:
name: ceph-validate
tasks_from: check_repository.yml
- name: configure repository
import_role:
- name: Configure repository
ansible.builtin.import_role:
name: ceph-common
tasks_from: "configure_repository.yml"
- name: install cephadm requirements
package:
- name: Install cephadm requirements
ansible.builtin.package:
name: ['python3', 'lvm2']
register: result
until: result is succeeded
- name: install cephadm
package:
- name: Install cephadm
ansible.builtin.package:
name: cephadm
register: result
until: result is succeeded
- name: set_fact cephadm_cmd
set_fact:
- name: Set_fact cephadm_cmd
ansible.builtin.set_fact:
cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"
- name: bootstrap the cluster
- name: Bootstrap the cluster
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_monitor_address.yml
- name: create /etc/ceph directory
file:
- name: Create /etc/ceph directory
ansible.builtin.file:
path: /etc/ceph
state: directory
mode: "0755"
- name: bootstrap the new cluster
- name: Bootstrap the new cluster
cephadm_bootstrap:
mon_ip: "{{ _current_monitor_address }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
@ -164,46 +171,46 @@
ssh_user: "{{ cephadm_ssh_user | default('root') }}"
ssh_config: "{{ cephadm_ssh_config | default(omit) }}"
- name: set default container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: Set default container image in ceph configuration
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set container image base in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
- name: Set container image base in ceph configuration
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set dashboard container image in ceph mgr configuration
- name: Set dashboard container image in ceph mgr configuration
when: dashboard_enabled | bool
block:
- name: set alertmanager container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
- name: Set alertmanager container image in ceph configuration
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set grafana container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
- name: Set grafana container image in ceph configuration
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set node-exporter container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
- name: Set node-exporter container image in ceph configuration
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set prometheus container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
- name: Set prometheus container image in ceph configuration
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: add the other nodes
- name: Add the other nodes
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
@ -217,11 +224,12 @@
become: true
gather_facts: false
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: get the cephadm ssh pub key
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key"
- name: Get the cephadm ssh pub key
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key"
changed_when: false
run_once: true
register: cephadm_pubpkey
@ -229,35 +237,35 @@
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: allow cephadm key for {{ cephadm_ssh_user | default('root') }} account
authorized_key:
- name: Allow cephadm key
ansible.posix.authorized_key:
user: "{{ cephadm_ssh_user | default('root') }}"
key: '{{ cephadm_pubpkey.stdout }}'
- name: run cephadm prepare-host
command: cephadm prepare-host
- name: Run cephadm prepare-host
ansible.builtin.command: cephadm prepare-host
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: manage nodes with cephadm - ipv4
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
- name: Manage nodes with cephadm - ipv4
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: ip_version == 'ipv4'
- name: manage nodes with cephadm - ipv6
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
- name: Manage nodes with cephadm - ipv6
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: ip_version == 'ipv6'
- name: add ceph label for core component
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
- name: Add ceph label for core component
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
@ -269,22 +277,23 @@
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: adjust service placement
- name: Adjust service placement
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: update the placement of monitor hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'"
- name: Update the placement of monitor hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: waiting for the monitor to join the quorum...
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json"
- name: Waiting for the monitor to join the quorum...
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json"
changed_when: false
register: ceph_health_raw
until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length
@ -293,83 +302,85 @@
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of manager hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'"
- name: Update the placement of manager hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of crash hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
- name: Update the placement of crash hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: adjust monitoring service placement
- name: Adjust monitoring service placement
hosts: "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
tasks:
- import_role:
- name: Import ceph-defaults
ansible.builtin.import_role:
name: ceph-defaults
- name: with dashboard enabled
- name: With dashboard enabled
when: dashboard_enabled | bool
delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true
block:
- name: enable the prometheus module
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus"
- name: Enable the prometheus module
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of alertmanager hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'"
- name: Update the placement of alertmanager hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of grafana hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'"
- name: Update the placement of grafana hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of prometheus hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'"
- name: Update the placement of prometheus hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of node-exporter hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
- name: Update the placement of node-exporter hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: print information
- name: Print information
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- import_role:
- name: Import ceph-defaults
ansible.builtin.import_role:
name: ceph-defaults
- name: show ceph orchestrator services
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh"
- name: Show ceph orchestrator services
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: show ceph orchestrator daemons
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh"
- name: Show ceph orchestrator daemons
ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: inform users about cephadm
debug:
- name: Inform users about cephadm
ansible.builtin.debug:
msg: |
This Ceph cluster is now ready to receive more configuration like
adding OSD, MDS daemons, create pools or keyring.

View File

@ -5,54 +5,58 @@
# It is *not* intended to restart services since we don't want to multiple services
# restarts.
- hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- iscsigws
- mgrs
- monitoring
- name: Pre-requisite and facts gathering
hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- iscsigws
- mgrs
- monitoring
gather_facts: false
become: True
become: true
any_errors_fatal: true
vars:
delegate_facts_host: True
delegate_facts_host: true
pre_tasks:
- import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
- name: Import raw_install_python tasks
ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
# pre-tasks for following import -
- name: gather facts
setup:
- name: Gather facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- name: gather and delegate facts
setup:
- name: Gather and delegate facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}"
run_once: true
when: delegate_facts_host | bool
- hosts:
- name: Migrate to podman
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
@ -65,20 +69,25 @@
gather_facts: false
become: true
tasks:
- name: set_fact docker2podman and container_binary
set_fact:
docker2podman: True
- name: Set_fact docker2podman and container_binary
ansible.builtin.set_fact:
docker2podman: true
container_binary: podman
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- name: install podman
package:
- name: Install podman
ansible.builtin.package:
name: podman
state: present
register: result
@ -86,17 +95,17 @@
tags: with_pkg
when: not is_atomic | bool
- name: check podman presence # noqa : 305
shell: command -v podman
- name: Check podman presence # noqa command-instead-of-shell
ansible.builtin.shell: command -v podman
register: podman_presence
changed_when: false
failed_when: false
- name: pulling images from docker daemon
- name: Pulling images from docker daemon
when: podman_presence.rc == 0
block:
- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image from docker daemon"
command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: Pulling Ceph container image from docker daemon
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: pull_image
until: pull_image.rc == 0
@ -111,8 +120,8 @@
inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, [])
- name: "pulling alertmanager/grafana/prometheus images from docker daemon"
command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
- name: Pulling alertmanager/grafana/prometheus images from docker daemon
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
changed_when: false
register: pull_image
until: pull_image.rc == 0
@ -126,8 +135,8 @@
- dashboard_enabled | bool
- inventory_hostname in groups.get(monitoring_group_name, [])
- name: "pulling {{ node_exporter_container_image }} image from docker daemon"
command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}"
- name: Pulling node_exporter image from docker daemon
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}"
changed_when: false
register: pull_image
until: pull_image.rc == 0
@ -135,47 +144,56 @@
delay: 10
when: dashboard_enabled | bool
- import_role:
- name: Import ceph-mon role
ansible.builtin.import_role:
name: ceph-mon
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, [])
- import_role:
- name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw
tasks_from: systemd.yml
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- import_role:
- name: Import ceph-mds role
ansible.builtin.import_role:
name: ceph-mds
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mds_group_name, [])
- import_role:
- name: Import ceph-mgr role
ansible.builtin.import_role:
name: ceph-mgr
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mgr_group_name, [])
- import_role:
- name: Import ceph-nfs role
ansible.builtin.import_role:
name: ceph-nfs
tasks_from: systemd.yml
when: inventory_hostname in groups.get(nfs_group_name, [])
- import_role:
- name: Import ceph-osd role
ansible.builtin.import_role:
name: ceph-osd
tasks_from: systemd.yml
when: inventory_hostname in groups.get(osd_group_name, [])
- import_role:
- name: Import ceph-rbd-mirror role
ansible.builtin.import_role:
name: ceph-rbd-mirror
tasks_from: systemd.yml
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- import_role:
- name: Import ceph-rgw role
ansible.builtin.import_role:
name: ceph-rgw
tasks_from: systemd.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
- import_role:
- name: Import ceph-crash role
ansible.builtin.import_role:
name: ceph-crash
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, []) or
@ -185,28 +203,32 @@
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: dashboard configuration
- name: Dashboard configuration
when: dashboard_enabled | bool
block:
- import_role:
- name: Import ceph-node-exporter role
ansible.builtin.import_role:
name: ceph-node-exporter
tasks_from: systemd.yml
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana.yml
when: inventory_hostname in groups.get(monitoring_group_name, [])
- import_role:
- name: Import ceph-grafana role
ansible.builtin.import_role:
name: ceph-grafana
tasks_from: systemd.yml
when: inventory_hostname in groups.get(monitoring_group_name, [])
- import_role:
- name: Import ceph-prometheus role
ansible.builtin.import_role:
name: ceph-prometheus
tasks_from: systemd.yml
when: inventory_hostname in groups.get(monitoring_group_name, [])
- name: reload systemd daemon
systemd:
daemon_reload: yes
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true

View File

@ -1,20 +1,22 @@
- hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- mgrs
- iscsigws
---
- name: Gather ceph logs
hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- mgrs
- iscsigws
gather_facts: false
become: yes
become: true
tasks:
- name: create a temp directory
tempfile:
- name: Create a temp directory
ansible.builtin.tempfile:
state: directory
prefix: ceph_ansible
run_once: true
@ -22,17 +24,17 @@
become: false
delegate_to: localhost
- name: set_fact lookup_ceph_config - lookup keys, conf and logs
find:
- name: Set_fact lookup_ceph_config - lookup keys, conf and logs
ansible.builtin.find:
paths:
- /etc/ceph
- /var/log/ceph
register: ceph_collect
- name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible
fetch:
- name: Collect ceph logs, config and keys on the machine running ansible
ansible.builtin.fetch:
src: "{{ item.path }}"
dest: "{{ localtempfile.path }}"
fail_on_missing: no
flat: no
fail_on_missing: false
flat: false
with_items: "{{ ceph_collect.files }}"

View File

@ -1,4 +1,5 @@
- name: creates logical volumes for the bucket index or fs journals on a single device.
---
- name: Creates logical volumes for the bucket index or fs journals on a single device.
become: true
hosts: osds
@ -21,78 +22,79 @@
tasks:
- name: include vars of lv_vars.yaml
include_vars:
file: lv_vars.yaml # noqa 505
failed_when: false
- name: Include vars of lv_vars.yaml
ansible.builtin.include_vars:
file: lv_vars.yaml # noqa missing-import
failed_when: false
# ensure nvme_device is set
- name: fail if nvme_device is not defined
fail:
msg: "nvme_device has not been set by the user"
when: nvme_device is undefined or nvme_device == 'dummy'
# ensure nvme_device is set
- name: Fail if nvme_device is not defined
ansible.builtin.fail:
msg: "nvme_device has not been set by the user"
when: nvme_device is undefined or nvme_device == 'dummy'
# need to check if lvm2 is installed
- name: install lvm2
package:
name: lvm2
state: present
register: result
until: result is succeeded
# need to check if lvm2 is installed
- name: Install lvm2
ansible.builtin.package:
name: lvm2
state: present
register: result
until: result is succeeded
# Make entire nvme device a VG
- name: add nvme device as lvm pv
lvg:
force: yes
pvs: "{{ nvme_device }}"
pesize: 4
state: present
vg: "{{ nvme_vg_name }}"
# Make entire nvme device a VG
- name: Add nvme device as lvm pv
community.general.lvg:
force: true
pvs: "{{ nvme_device }}"
pesize: 4
state: present
vg: "{{ nvme_vg_name }}"
- name: create lvs for fs journals for the bucket index on the nvme device
lvol:
lv: "{{ item.journal_name }}"
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
pvs: "{{ nvme_device }}"
with_items: "{{ nvme_device_lvs }}"
- name: Create lvs for fs journals for the bucket index on the nvme device
community.general.lvol:
lv: "{{ item.journal_name }}"
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
pvs: "{{ nvme_device }}"
with_items: "{{ nvme_device_lvs }}"
- name: create lvs for fs journals for hdd devices
lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
with_items: "{{ hdd_devices }}"
- name: Create lvs for fs journals for hdd devices
community.general.lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}"
with_items: "{{ hdd_devices }}"
- name: create the lv for data portion of the bucket index on the nvme device
lvol:
lv: "{{ item.lv_name }}"
vg: "{{ nvme_vg_name }}"
size: "{{ item.size }}"
pvs: "{{ nvme_device }}"
with_items: "{{ nvme_device_lvs }}"
- name: Create the lv for data portion of the bucket index on the nvme device
community.general.lvol:
lv: "{{ item.lv_name }}"
vg: "{{ nvme_vg_name }}"
size: "{{ item.size }}"
pvs: "{{ nvme_device }}"
with_items: "{{ nvme_device_lvs }}"
# Make sure all hdd devices have a unique volume group
- name: create vgs for all hdd devices
lvg:
force: yes
pvs: "{{ item }}"
pesize: 4
state: present
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
with_items: "{{ hdd_devices }}"
# Make sure all hdd devices have a unique volume group
- name: Create vgs for all hdd devices
community.general.lvg:
force: true
pvs: "{{ item }}"
pesize: 4
state: present
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
with_items: "{{ hdd_devices }}"
- name: create lvs for the data portion on hdd devices
lvol:
lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
size: "{{ hdd_lv_size }}"
pvs: "{{ item }}"
with_items: "{{ hdd_devices }}"
- name: Create lvs for the data portion on hdd devices
community.general.lvol:
lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
size: "{{ hdd_lv_size }}"
pvs: "{{ item }}"
with_items: "{{ hdd_devices }}"
- name: "write output for osds.yml to {{ logfile_path }}"
become: false
copy:
content: "{{ logfile }}"
dest: "{{ logfile_path }}"
delegate_to: localhost
- name: Write output for osds.yml
become: false
ansible.builtin.copy:
content: "{{ logfile }}"
dest: "{{ logfile_path }}"
mode: preserve
delegate_to: localhost

View File

@ -1,108 +1,109 @@
- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
---
- name: Tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
become: true
hosts: osds
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to tear down the logical volumes?
default: 'no'
private: no
private: false
tasks:
- name: exit playbook, if user did not mean to tear down logical volumes
fail:
msg: >
"Exiting lv-teardown playbook, logical volumes were NOT torn down.
To tear down the logical volumes, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: Exit playbook, if user did not mean to tear down logical volumes
ansible.builtin.fail:
msg: >
"Exiting lv-teardown playbook, logical volumes were NOT torn down.
To tear down the logical volumes, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: include vars of lv_vars.yaml
include_vars:
file: lv_vars.yaml # noqa 505
failed_when: false
- name: Include vars of lv_vars.yaml
ansible.builtin.include_vars:
file: lv_vars.yaml # noqa missing-import
failed_when: false
# need to check if lvm2 is installed
- name: install lvm2
package:
name: lvm2
state: present
register: result
until: result is succeeded
# need to check if lvm2 is installed
- name: Install lvm2
ansible.builtin.package:
name: lvm2
state: present
register: result
until: result is succeeded
# BEGIN TEARDOWN
- name: find any existing osd filesystems
shell: |
set -o pipefail;
grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
register: old_osd_filesystems
changed_when: false
- name: Find any existing osd filesystems
ansible.builtin.shell: |
set -o pipefail;
grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
register: old_osd_filesystems
changed_when: false
- name: tear down any existing osd filesystem
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ old_osd_filesystems.stdout_lines }}"
- name: Tear down any existing osd filesystem
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ old_osd_filesystems.stdout_lines }}"
- name: kill all lvm commands that may have been hung
command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
failed_when: false
changed_when: false
- name: Kill all lvm commands that may have been hung
ansible.builtin.command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
failed_when: false
changed_when: false
## Logcal Vols
- name: tear down existing lv for bucket index
lvol:
lv: "{{ item.lv_name }}"
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
with_items: "{{ nvme_device_lvs }}"
## Logcal Vols
- name: Tear down existing lv for bucket index
community.general.lvol:
lv: "{{ item.lv_name }}"
vg: "{{ nvme_vg_name }}"
state: absent
force: true
with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing hdd data lvs
lvol:
lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: yes
with_items: "{{ hdd_devices }}"
- name: Tear down any existing hdd data lvs
community.general.lvol:
lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: true
with_items: "{{ hdd_devices }}"
- name: tear down any existing lv of journal for bucket index
lvol:
lv: "{{ item.journal_name }}"
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
with_items: "{{ nvme_device_lvs }}"
- name: Tear down any existing lv of journal for bucket index
community.general.lvol:
lv: "{{ item.journal_name }}"
vg: "{{ nvme_vg_name }}"
state: absent
force: true
with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing lvs of hdd journals
lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
with_items: "{{ hdd_devices }}"
- name: Tear down any existing lvs of hdd journals
community.general.lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}"
state: absent
force: true
with_items: "{{ hdd_devices }}"
## Volume Groups
- name: remove vg on nvme device
lvg:
vg: "{{ nvme_vg_name }}"
state: absent
force: yes
## Volume Groups
- name: Remove vg on nvme device
community.general.lvg:
vg: "{{ nvme_vg_name }}"
state: absent
force: true
- name: remove vg for each hdd device
lvg:
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: yes
with_items: "{{ hdd_devices }}"
- name: Remove vg for each hdd device
community.general.lvg:
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent
force: true
with_items: "{{ hdd_devices }}"
## Physical Vols
- name: tear down pv for nvme device
command: "pvremove --force --yes {{ nvme_device }}"
changed_when: false
## Physical Vols
- name: Tear down pv for nvme device
ansible.builtin.command: "pvremove --force --yes {{ nvme_device }}"
changed_when: false
- name: tear down pv for each hdd device
command: "pvremove --force --yes {{ item }}"
changed_when: false
with_items: "{{ hdd_devices }}"
- name: Tear down pv for each hdd device
ansible.builtin.command: "pvremove --force --yes {{ item }}"
changed_when: false
with_items: "{{ hdd_devices }}"

File diff suppressed because it is too large Load Diff

View File

@ -13,17 +13,17 @@
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to purge the dashboard
- name: Confirm whether user really meant to purge the dashboard
hosts: localhost
gather_facts: false
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to purge the dashboard?
default: 'no'
private: no
private: false
tasks:
- name: exit playbook, if user did not mean to purge dashboard
fail:
- name: Exit playbook, if user did not mean to purge dashboard
ansible.builtin.fail:
msg: >
"Exiting purge-dashboard playbook, dashboard was NOT purged.
To purge the dashboard, either say 'yes' on the prompt or
@ -31,18 +31,18 @@
invoking the playbook"
when: ireallymeanit != 'yes'
- name: import_role ceph-defaults
import_role:
- name: Import_role ceph-defaults
ansible.builtin.import_role:
name: ceph-defaults
- name: check if a legacy grafana-server group exists
import_role:
- name: Check if a legacy grafana-server group exists
ansible.builtin.import_role:
name: ceph-facts
tasks_from: convert_grafana_server_group_name.yml
when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
- name: gather facts on all hosts
- name: Gather facts on all hosts
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
@ -55,9 +55,11 @@
- "{{ monitoring_group_name | default('monitoring') }}"
become: true
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: purge node exporter
- name: Purge node exporter
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
@ -71,58 +73,62 @@
gather_facts: false
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: disable node_exporter service
service:
- name: Disable node_exporter service
ansible.builtin.service:
name: node_exporter
state: stopped
enabled: no
enabled: false
failed_when: false
- name: remove node_exporter service files
file:
- name: Remove node_exporter service files
ansible.builtin.file:
name: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/node_exporter.service
- /run/node_exporter.service-cid
- name: remove node-exporter image
command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
- name: Remove node-exporter image
ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
changed_when: false
failed_when: false
- name: purge ceph monitoring
- name: Purge ceph monitoring
hosts: "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: stop services
service:
- name: Stop services
ansible.builtin.service:
name: "{{ item }}"
state: stopped
enabled: no
enabled: false
failed_when: false
loop:
- alertmanager
- prometheus
- grafana-server
- name: remove systemd service files
file:
- name: Remove systemd service files
ansible.builtin.file:
name: "{{ item }}"
state: absent
loop:
@ -133,8 +139,8 @@
- /run/prometheus.service-cid
- /run/grafana-server.service-cid
- name: remove ceph dashboard container images
command: "{{ container_binary }} rmi {{ item }}"
- name: Remove ceph dashboard container images
ansible.builtin.command: "{{ container_binary }} rmi {{ item }}"
loop:
- "{{ alertmanager_container_image }}"
- "{{ prometheus_container_image }}"
@ -142,16 +148,16 @@
changed_when: false
failed_when: false
- name: remove ceph-grafana-dashboards package on RedHat or SUSE
package:
- name: Remove ceph-grafana-dashboards package on RedHat or SUSE
ansible.builtin.package:
name: ceph-grafana-dashboards
state: absent
when:
- not containerized_deployment | bool
- ansible_facts['os_family'] in ['RedHat', 'Suse']
- name: remove data
file:
- name: Remove data
ansible.builtin.file:
name: "{{ item }}"
state: absent
loop:
@ -162,7 +168,7 @@
- "{{ prometheus_data_dir }}"
- /var/lib/grafana
- name: purge ceph dashboard
- name: Purge ceph dashboard
hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}"
gather_facts: false
become: true
@ -170,14 +176,16 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: remove the dashboard admin user
- name: Remove the dashboard admin user
ceph_dashboard_user:
name: "{{ dashboard_admin_user }}"
cluster: "{{ cluster }}"
@ -185,7 +193,7 @@
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: remove radosgw system user
- name: Remove radosgw system user
radosgw_user:
name: "{{ dashboard_rgw_api_user_id }}"
cluster: "{{ cluster }}"
@ -194,7 +202,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
when: groups.get(rgw_group_name, []) | length > 0
- name: disable mgr dashboard and prometheus modules
- name: Disable mgr dashboard and prometheus modules
ceph_mgr_module:
name: "{{ item }}"
cluster: "{{ cluster }}"
@ -205,8 +213,8 @@
- dashboard
- prometheus
- name: remove TLS certificate and key files
file:
- name: Remove TLS certificate and key files
ansible.builtin.file:
name: "/etc/ceph/ceph-dashboard.{{ item }}"
state: absent
loop:
@ -214,8 +222,8 @@
- key
when: dashboard_protocol == "https"
- name: remove ceph-mgr-dashboard package
package:
- name: Remove ceph-mgr-dashboard package
ansible.builtin.package:
name: ceph-mgr-dashboard
state: absent
when: not containerized_deployment | bool

View File

@ -1,96 +1,97 @@
---
- name: Confirm removal of the iSCSI gateway configuration
hosts: localhost
vars_prompt:
- name: purge_config
- name: purge_config # noqa: name[casing]
prompt: Which configuration elements should be purged? (all, lio or abort)
default: 'abort'
private: no
private: false
tasks:
- name: Exit playbook if user aborted the purge
fail:
ansible.builtin.fail:
msg: >
"You have aborted the purge of the iSCSI gateway configuration"
when: purge_config == 'abort'
- name: set_fact igw_purge_type
set_fact:
- name: Set_fact igw_purge_type
ansible.builtin.set_fact:
igw_purge_type: "{{ purge_config }}"
- name: stopping the gateways
- name: Stopping the gateways
hosts:
- iscsigws
become: yes
vars:
- igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
become: true
tasks:
- name: stopping and disabling iscsi daemons
service:
- name: Stopping and disabling iscsi daemons
ansible.builtin.service:
name: "{{ item }}"
state: stopped
enabled: no
enabled: false
with_items:
- rbd-target-gw
- rbd-target-api
- tcmu-runner
- name: removing the gateway configuration
- name: Removing the gateway configuration
hosts:
- iscsigws
become: yes
become: true
vars:
- igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
tasks:
- name: igw_purge | deleting configured rbd devices
igw_purge: mode="disks"
- name: Igw_purge | deleting configured rbd devices
igw_purge:
mode: "disks"
when: igw_purge_type == 'all'
run_once: true
- name: igw_purge | purging the gateway configuration
igw_purge: mode="gateway"
- name: Igw_purge | purging the gateway configuration
igw_purge:
mode: "gateway"
run_once: true
- name: restart and enable iscsi daemons
- name: Restart and enable iscsi daemons
when: igw_purge_type == 'lio'
service:
ansible.builtin.service:
name: "{{ item }}"
state: started
enabled: yes
enabled: true
with_items:
- tcmu-runner
- rbd-target-api
- rbd-target-gw
- name: remove the gateways from the ceph dashboard
- name: Remove the gateways from the ceph dashboard
hosts: mons
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: iscsi gateways with ceph dashboard
- name: Iscsi gateways with ceph dashboard
when: dashboard_enabled | bool
run_once: true
block:
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: set_fact container_exec_cmd
set_fact:
- name: Set_fact container_exec_cmd
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: get iscsi gateway list
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
- name: Get iscsi gateway list
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
changed_when: false
register: gateways
- name: remove iscsi gateways
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
- name: Remove iscsi gateways
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
with_items: '{{ (gateways.stdout | from_json)["gateways"] }}'
changed_when: false

View File

@ -14,52 +14,52 @@
# Additionally modify the users list and buckets list to create the
# users and buckets you want
#
- name: add rgw users and buckets
- name: Add rgw users and buckets
connection: local
hosts: localhost
gather_facts: no
gather_facts: false
tasks:
- name: add rgw users and buckets
ceph_add_users_buckets:
rgw_host: '172.20.0.2'
port: 8000
admin_access_key: '8W56BITCSX27CD555Z5B'
admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20'
users:
- username: 'test1'
fullname: 'tester'
email: 'dan1@email.com'
maxbucket: 666
suspend: false
autogenkey: false
accesskey: 'B3AR4Q33L59YV56A9A2F'
secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
userquota: true
usermaxsize: '1000'
usermaxobjects: 3
bucketquota: true
bucketmaxsize: '1000'
bucketmaxobjects: 3
- username: 'test2'
fullname: 'tester'
buckets:
- bucket: 'bucket1'
user: 'test2'
- bucket: 'bucket2'
user: 'test1'
- bucket: 'bucket3'
user: 'test1'
- bucket: 'bucket4'
user: 'test1'
- bucket: 'bucket5'
user: 'test1'
- bucket: 'bucket6'
user: 'test2'
- bucket: 'bucket7'
user: 'test2'
- bucket: 'bucket8'
user: 'test2'
- bucket: 'bucket9'
user: 'test2'
- bucket: 'bucket10'
user: 'test2'
- name: Add rgw users and buckets
ceph_add_users_buckets:
rgw_host: '172.20.0.2'
port: 8000
admin_access_key: '8W56BITCSX27CD555Z5B'
admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20'
users:
- username: 'test1'
fullname: 'tester'
email: 'dan1@email.com'
maxbucket: 666
suspend: false
autogenkey: false
accesskey: 'B3AR4Q33L59YV56A9A2F'
secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
userquota: true
usermaxsize: '1000'
usermaxobjects: 3
bucketquota: true
bucketmaxsize: '1000'
bucketmaxobjects: 3
- username: 'test2'
fullname: 'tester'
buckets:
- bucket: 'bucket1'
user: 'test2'
- bucket: 'bucket2'
user: 'test1'
- bucket: 'bucket3'
user: 'test1'
- bucket: 'bucket4'
user: 'test1'
- bucket: 'bucket5'
user: 'test1'
- bucket: 'bucket6'
user: 'test2'
- bucket: 'bucket7'
user: 'test2'
- bucket: 'bucket8'
user: 'test2'
- bucket: 'bucket9'
user: 'test2'
- bucket: 'bucket10'
user: 'test2'

File diff suppressed because it is too large Load Diff

View File

@ -9,35 +9,41 @@
# ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: gather facts and check the init system
- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mds_group_name | default('mdss') }}"
become: true
tasks:
- debug:
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: gather facts on all Ceph hosts for following reference
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: perform checks, remove mds and print cluster health
- name: Perform checks, remove mds and print cluster health
hosts: mons[0]
become: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
private: false
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: exit playbook, if no mds was given
- name: Exit playbook, if no mds was given
when: mds_to_kill is not defined
fail:
ansible.builtin.fail:
msg: >
mds_to_kill must be declared.
Exiting shrink-cluster playbook, no MDS was removed. On the command
@ -45,106 +51,109 @@
"-e mds_to_kill=ceph-mds1" argument. You can only remove a single
MDS each time the playbook runs."
- name: exit playbook, if the mds is not part of the inventory
- name: Exit playbook, if the mds is not part of the inventory
when: mds_to_kill not in groups[mds_group_name]
fail:
ansible.builtin.fail:
msg: "It seems that the host given is not part of your inventory,
please make sure it is."
- name: exit playbook, if user did not mean to shrink cluster
- name: Exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes'
fail:
ansible.builtin.fail:
msg: "Exiting shrink-mds playbook, no mds was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
- name: set_fact container_exec_cmd for mon0
set_fact:
- name: Set_fact container_exec_cmd for mon0
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
changed_when: false
register: ceph_health
until: ceph_health is succeeded
retries: 5
delay: 2
- name: set_fact mds_to_kill_hostname
set_fact:
- name: Set_fact mds_to_kill_hostname
ansible.builtin.set_fact:
mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
tasks:
# get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
# removes the MDS from the FS map.
- name: exit mds when containerized deployment
command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
- name: Exit mds when containerized deployment
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
changed_when: false
when: containerized_deployment | bool
- name: get ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
- name: Get ceph status
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
changed_when: false
- name: set_fact current_max_mds
set_fact:
- name: Set_fact current_max_mds
ansible.builtin.set_fact:
current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}"
- name: fail if removing that mds node wouldn't satisfy max_mds anymore
fail:
- name: Fail if removing that mds node wouldn't satisfy max_mds anymore
ansible.builtin.fail:
msg: "Can't remove more mds as it won't satisfy current max_mds setting"
when:
- ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int
- (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1
- name: stop mds service and verify it
- name: Stop mds service and verify it
block:
- name: stop mds service
service:
- name: Stop mds service
ansible.builtin.service:
name: ceph-mds@{{ mds_to_kill_hostname }}
state: stopped
enabled: no
enabled: false
delegate_to: "{{ mds_to_kill }}"
failed_when: false
- name: ensure that the mds is stopped
command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa 303
- name: Ensure that the mds is stopped
ansible.builtin.command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa command-instead-of-module
register: mds_to_kill_status
failed_when: mds_to_kill_status.rc == 0
delegate_to: "{{ mds_to_kill }}"
retries: 5
delay: 2
changed_when: false
- name: fail if the mds is reported as active or standby
- name: Fail if the mds is reported as active or standby
block:
- name: get new ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
- name: Get new ceph status
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
changed_when: false
- name: get active mds nodes list
set_fact:
- name: Get active mds nodes list
ansible.builtin.set_fact:
active_mdss: "{{ active_mdss | default([]) + [item.name] }}"
with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}"
- name: get ceph fs dump status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
- name: Get ceph fs dump status
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
register: ceph_fs_status
changed_when: false
- name: create a list of standby mdss
set_fact:
- name: Create a list of standby mdss
ansible.builtin.set_fact:
standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list
- name: fail if mds just killed is being reported as active or standby
fail:
- name: Fail if mds just killed is being reported as active or standby
ansible.builtin.fail:
msg: "mds node {{ mds_to_kill }} still up and running."
when:
- (mds_to_kill in active_mdss | default([])) or
(mds_to_kill in standby_mdss | default([]))
- name: delete the filesystem when killing last mds
- name: Delete the filesystem when killing last mds
ceph_fs:
name: "{{ cephfs }}"
cluster: "{{ cluster }}"
@ -156,13 +165,13 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: purge mds store
file:
- name: Purge mds store
ansible.builtin.file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }}
state: absent
delegate_to: "{{ mds_to_kill }}"
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false

View File

@ -11,62 +11,66 @@
# automation scripts to avoid interactive prompt.
- name: gather facts and check the init system
- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}"
become: true
tasks:
- debug:
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: gather facts on all Ceph hosts for following reference
- name: confirm if user really meant to remove manager from the ceph cluster
- name: Confirm if user really meant to remove manager from the ceph cluster
hosts: mons[0]
become: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
private: false
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: set_fact container_exec_cmd
- name: Set_fact container_exec_cmd
when: containerized_deployment | bool
set_fact:
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
- name: get total number of mgrs in cluster
- name: Get total number of mgrs in cluster
block:
- name: save mgr dump output
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
- name: Save mgr dump output
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump
changed_when: false
- name: get active and standbys mgr list
set_fact:
- name: Get active and standbys mgr list
ansible.builtin.set_fact:
active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}"
standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}"
- name: exit playbook, if there's no standby manager
fail:
- name: Exit playbook, if there's no standby manager
ansible.builtin.fail:
msg: "You are about to shrink the only manager present in the cluster."
when: standbys_mgr | length | int < 1
- name: exit playbook, if no manager was given
fail:
- name: Exit playbook, if no manager was given
ansible.builtin.fail:
msg: "mgr_to_kill must be declared
Exiting shrink-cluster playbook, no manager was removed.
On the command line when invoking the playbook, you can use
@ -74,46 +78,47 @@
manager each time the playbook runs."
when: mgr_to_kill is not defined
- name: exit playbook, if user did not mean to shrink cluster
fail:
- name: Exit playbook, if user did not mean to shrink cluster
ansible.builtin.fail:
msg: "Exiting shrink-mgr playbook, no manager was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: set_fact mgr_to_kill_hostname
set_fact:
- name: Set_fact mgr_to_kill_hostname
ansible.builtin.set_fact:
mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
- name: exit playbook, if the selected manager is not present in the cluster
fail:
- name: Exit playbook, if the selected manager is not present in the cluster
ansible.builtin.fail:
msg: "It seems that the host given is not present in the cluster."
when:
- mgr_to_kill_hostname not in active_mgr
- mgr_to_kill_hostname not in standbys_mgr
tasks:
- name: stop manager services and verify it
- name: Stop manager services and verify it
block:
- name: stop manager service
service:
- name: Stop manager service
ansible.builtin.service:
name: ceph-mgr@{{ mgr_to_kill_hostname }}
state: stopped
enabled: no
enabled: false
delegate_to: "{{ mgr_to_kill }}"
failed_when: false
- name: ensure that the mgr is stopped
command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa 303
- name: Ensure that the mgr is stopped
ansible.builtin.command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa command-instead-of-module
register: mgr_to_kill_status
failed_when: mgr_to_kill_status.rc == 0
delegate_to: "{{ mgr_to_kill }}"
changed_when: false
retries: 5
delay: 2
- name: fail if the mgr is reported in ceph mgr dump
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
- name: Fail if the mgr is reported in ceph mgr dump
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump
changed_when: false
failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
@ -121,13 +126,13 @@
retries: 12
delay: 10
- name: purge manager store
file:
- name: Purge manager store
ansible.builtin.file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }}
state: absent
delegate_to: "{{ mgr_to_kill }}"
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false

View File

@ -12,75 +12,79 @@
# automation scripts to avoid interactive prompt.
- name: gather facts and check the init system
- name: Gather facts and check the init system
hosts: "{{ mon_group_name|default('mons') }}"
become: true
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove monitor from the ceph cluster
- name: Confirm whether user really meant to remove monitor from the ceph cluster
hosts: mons[0]
become: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
private: false
vars:
mon_group_name: mons
pre_tasks:
- name: exit playbook, if only one monitor is present in cluster
fail:
- name: Exit playbook, if only one monitor is present in cluster
ansible.builtin.fail:
msg: "You are about to shrink the only monitor present in the cluster.
If you really want to do that, please use the purge-cluster playbook."
when: groups[mon_group_name] | length | int == 1
- name: exit playbook, if no monitor was given
fail:
- name: Exit playbook, if no monitor was given
ansible.builtin.fail:
msg: "mon_to_kill must be declared
Exiting shrink-cluster playbook, no monitor was removed.
On the command line when invoking the playbook, you can use
-e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs."
when: mon_to_kill is not defined
- name: exit playbook, if the monitor is not part of the inventory
fail:
- name: Exit playbook, if the monitor is not part of the inventory
ansible.builtin.fail:
msg: "It seems that the host given is not part of your inventory, please make sure it is."
when: mon_to_kill not in groups[mon_group_name]
- name: exit playbook, if user did not mean to shrink cluster
fail:
- name: Exit playbook, if user did not mean to shrink cluster
ansible.builtin.fail:
msg: "Exiting shrink-mon playbook, no monitor was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
tasks:
- name: pick a monitor different than the one we want to remove
set_fact:
- name: Pick a monitor different than the one we want to remove
ansible.builtin.set_fact:
mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when: item != mon_to_kill
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
- name: Set container_exec_cmd fact
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
@ -88,33 +92,33 @@
retries: 5
delay: 2
- name: set_fact mon_to_kill_hostname
set_fact:
- name: Set_fact mon_to_kill_hostname
ansible.builtin.set_fact:
mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
- name: stop monitor service(s)
service:
- name: Stop monitor service(s)
ansible.builtin.service:
name: ceph-mon@{{ mon_to_kill_hostname }}
state: stopped
enabled: no
enabled: false
delegate_to: "{{ mon_to_kill }}"
failed_when: false
- name: purge monitor store
file:
- name: Purge monitor store
ansible.builtin.file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }}
state: absent
delegate_to: "{{ mon_to_kill }}"
- name: remove monitor from the quorum
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
- name: Remove monitor from the quorum
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
changed_when: false
failed_when: false
delegate_to: "{{ mon_host }}"
post_tasks:
- name: verify the monitor is out of the cluster
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
- name: Verify the monitor is out of the cluster
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
delegate_to: "{{ mon_host }}"
changed_when: false
failed_when: false
@ -123,25 +127,25 @@
retries: 2
delay: 10
- name: please remove the monitor from your ceph configuration file
debug:
msg: "The monitor has been successfully removed from the cluster.
Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
- name: Please remove the monitor from your ceph configuration file
ansible.builtin.debug:
msg: "The monitor has been successfully removed from the cluster.
Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
run_once: true
when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
- name: fail if monitor is still part of the cluster
fail:
msg: "Monitor appears to still be part of the cluster, please check what happened."
- name: Fail if monitor is still part of the cluster
ansible.builtin.fail:
msg: "Monitor appears to still be part of the cluster, please check what happened."
run_once: true
when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names']
- name: show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ mon_host }}"
changed_when: false
- name: show ceph mon status
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
- name: Show ceph mon status
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
delegate_to: "{{ mon_host }}"
changed_when: false

View File

@ -11,102 +11,101 @@
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: gather facts and check the init system
- name: Gather facts and check the init system
hosts:
- mons
- osds
become: True
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove osd(s) from the cluster
hosts: mons[0]
become: true
tasks:
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: Confirm whether user really meant to remove osd(s) from the cluster
hosts: mons[0]
become: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
private: false
vars:
mon_group_name: mons
osd_group_name: osds
pre_tasks:
- name: exit playbook, if user did not mean to shrink cluster
fail:
- name: Exit playbook, if user did not mean to shrink cluster
ansible.builtin.fail:
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: exit playbook, if no osd(s) was/were given
fail:
- name: Exit playbook, if no osd(s) was/were given
ansible.builtin.fail:
msg: "osd_to_kill must be declared
Exiting shrink-osd playbook, no OSD(s) was/were removed.
On the command line when invoking the playbook, you can use
-e osd_to_kill=0,1,2,3 argument."
when: osd_to_kill is not defined
- name: check the osd ids passed have the correct format
fail:
- name: Check the osd ids passed have the correct format
ansible.builtin.fail:
msg: "The id {{ item }} has wrong format, please pass the number only"
with_items: "{{ osd_to_kill.split(',') }}"
when: not item is regex("^\d+$")
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
- name: Set_fact container_exec_cmd build docker exec command (containerized)
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
retries: 5
delay: 2
- name: find the host(s) where the osd(s) is/are running on
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
- name: Find the host(s) where the osd(s) is/are running on
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
changed_when: false
with_items: "{{ osd_to_kill.split(',') }}"
register: find_osd_hosts
- name: set_fact osd_hosts
set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item ] ] }}"
- name: Set_fact osd_hosts
ansible.builtin.set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [[(item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item]] }}"
with_items: "{{ find_osd_hosts.results }}"
- name: set_fact _osd_hosts
set_fact:
- name: Set_fact _osd_hosts
ansible.builtin.set_fact:
_osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}"
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: set_fact host_list
set_fact:
- name: Set_fact host_list
ansible.builtin.set_fact:
host_list: "{{ host_list | default([]) | union([item.0]) }}"
loop: "{{ _osd_hosts }}"
- name: get ceph-volume lvm list data
- name: Get ceph-volume lvm list data
ceph_volume:
cluster: "{{ cluster }}"
action: list
@ -117,12 +116,12 @@
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
- name: set_fact _lvm_list
set_fact:
- name: Set_fact _lvm_list
ansible.builtin.set_fact:
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
with_items: "{{ _lvm_list_data.results }}"
- name: refresh /etc/ceph/osd files non containerized_deployment
- name: Refresh /etc/ceph/osd files non containerized_deployment
ceph_volume_simple_scan:
cluster: "{{ cluster }}"
force: true
@ -130,8 +129,8 @@
loop: "{{ host_list }}"
when: not containerized_deployment | bool
- name: get osd unit status
systemd:
- name: Get osd unit status
ansible.builtin.systemd:
name: ceph-osd@{{ item.2 }}
register: osd_status
delegate_to: "{{ item.0 }}"
@ -139,8 +138,8 @@
when:
- containerized_deployment | bool
- name: refresh /etc/ceph/osd files containerized_deployment
command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
- name: Refresh /etc/ceph/osd files containerized_deployment
ansible.builtin.command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
changed_when: false
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
@ -149,10 +148,13 @@
- item.2 not in _lvm_list.keys()
- osd_status.results[0].status.ActiveState == 'active'
- name: refresh /etc/ceph/osd files containerized_deployment when OSD container is down
- name: Refresh /etc/ceph/osd files containerized_deployment when OSD container is down
when:
- containerized_deployment | bool
- osd_status.results[0].status.ActiveState != 'active'
block:
- name: create tmp osd folder
file:
- name: Create tmp osd folder
ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: directory
mode: '0755'
@ -160,8 +162,8 @@
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: activate OSD
command: |
- name: Activate OSD
ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
@ -179,8 +181,8 @@
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: simple scan
command: |
- name: Simple scan
ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
@ -198,28 +200,24 @@
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: umount OSD temp folder
mount:
- name: Umount OSD temp folder
ansible.posix.mount:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: unmounted
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- name: remove OSD temp folder
file:
- name: Remove OSD temp folder
ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: absent
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
when:
- containerized_deployment | bool
- osd_status.results[0].status.ActiveState != 'active'
- name: find /etc/ceph/osd files
find:
- name: Find /etc/ceph/osd files
ansible.builtin.find:
paths: /etc/ceph/osd
pattern: "{{ item.2 }}-*"
register: ceph_osd_data
@ -227,8 +225,8 @@
loop: "{{ _osd_hosts }}"
when: item.2 not in _lvm_list.keys()
- name: slurp ceph osd files content
slurp:
- name: Slurp ceph osd files content
ansible.builtin.slurp:
src: "{{ item['files'][0]['path'] }}"
delegate_to: "{{ item.item.0 }}"
register: ceph_osd_files_content
@ -237,13 +235,13 @@
- item.skipped is undefined
- item.matched > 0
- name: set_fact ceph_osd_files_json
set_fact:
- name: Set_fact ceph_osd_files_json
ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}"
with_items: "{{ ceph_osd_files_content.results }}"
when: item.skipped is undefined
- name: mark osd(s) out of the cluster
- name: Mark osd(s) out of the cluster
ceph_osd:
ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}"
@ -253,15 +251,15 @@
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true
- name: stop osd(s) service
service:
- name: Stop osd(s) service
ansible.builtin.service:
name: ceph-osd@{{ item.2 }}
state: stopped
enabled: no
enabled: false
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- name: umount osd lockbox
- name: Umount osd lockbox
ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: absent
@ -273,7 +271,7 @@
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2]['data']['uuid'] is defined
- name: umount osd data
- name: Umount osd data
ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
@ -281,36 +279,38 @@
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- name: get parent device for data partition
command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
- name: Get parent device for data partition
ansible.builtin.command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
register: parent_device_data_part
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
changed_when: false
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['data']['path'] is defined
- name: add pkname information in ceph_osd_data_json
set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout }}, recursive=True) }}"
- name: Add pkname information in ceph_osd_data_json
ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout}}, recursive=True) }}"
loop: "{{ parent_device_data_part.results }}"
when: item.skipped is undefined
- name: close dmcrypt close on devices if needed
command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
- name: Close dmcrypt close on devices if needed
ansible.builtin.command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
with_nested:
- "{{ _osd_hosts }}"
- [ 'block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt' ]
- ['block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt']
delegate_to: "{{ item.0 }}"
failed_when: false
register: result
until: result is succeeded
changed_when: false
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2][item.3] is defined
- name: use ceph-volume lvm zap to destroy all partitions
- name: Use ceph-volume lvm zap to destroy all partitions
ceph_volume:
cluster: "{{ cluster }}"
action: zap
@ -321,7 +321,7 @@
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_nested:
- "{{ _osd_hosts }}"
- [ 'block', 'block.db', 'block.wal', 'journal', 'data' ]
- ['block', 'block.db', 'block.wal', 'journal', 'data']
delegate_to: "{{ item.0 }}"
failed_when: false
register: result
@ -329,7 +329,7 @@
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2][item.3] is defined
- name: zap osd devices
- name: Zap osd devices
ceph_volume:
action: "zap"
osd_fsid: "{{ item.1 }}"
@ -341,7 +341,7 @@
loop: "{{ _osd_hosts }}"
when: item.2 in _lvm_list.keys()
- name: ensure osds are marked down
- name: Ensure osds are marked down
ceph_osd:
ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}"
@ -352,7 +352,7 @@
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: purge osd(s) from the cluster
- name: Purge osd(s) from the cluster
ceph_osd:
ids: "{{ item }}"
cluster: "{{ cluster }}"
@ -363,17 +363,17 @@
run_once: true
with_items: "{{ osd_to_kill.split(',') }}"
- name: remove osd data dir
file:
- name: Remove osd data dir
ansible.builtin.file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- name: show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
changed_when: false
- name: show ceph osd tree
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
- name: Show ceph osd tree
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
changed_when: false

View File

@ -11,34 +11,37 @@
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: gather facts and check the init system
- name: Gather facts and check the init system
hosts:
- mons
- rbdmirrors
become: true
tasks:
- debug:
- name: Gather facts on MONs and RBD mirrors
ansible.builtin.debug:
msg: gather facts on MONs and RBD mirrors
- name: confirm whether user really meant to remove rbd mirror from the ceph
- name: Confirm whether user really meant to remove rbd mirror from the ceph
cluster
hosts: mons[0]
become: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
private: false
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: exit playbook, if no rbdmirror was given
fail:
- name: Exit playbook, if no rbdmirror was given
ansible.builtin.fail:
msg: "rbdmirror_to_kill must be declared
Exiting shrink-cluster playbook, no RBD mirror was removed.
On the command line when invoking the playbook, you can use
@ -46,68 +49,68 @@
single rbd mirror each time the playbook runs."
when: rbdmirror_to_kill is not defined
- name: exit playbook, if the rbdmirror is not part of the inventory
fail:
- name: Exit playbook, if the rbdmirror is not part of the inventory
ansible.builtin.fail:
msg: >
It seems that the host given is not part of your inventory,
please make sure it is.
when: rbdmirror_to_kill not in groups[rbdmirror_group_name]
- name: exit playbook, if user did not mean to shrink cluster
fail:
- name: Exit playbook, if user did not mean to shrink cluster
ansible.builtin.fail:
msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: set_fact container_exec_cmd for mon0
- name: Set_fact container_exec_cmd for mon0
when: containerized_deployment | bool
set_fact:
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
- name: set_fact rbdmirror_to_kill_hostname
set_fact:
- name: Set_fact rbdmirror_to_kill_hostname
ansible.builtin.set_fact:
rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
- name: set_fact rbdmirror_gids
set_fact:
rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}"
- name: Set_fact rbdmirror_gids
ansible.builtin.set_fact:
rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [item] }}"
with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}"
when: item != 'summary'
- name: set_fact rbdmirror_to_kill_gid
set_fact:
- name: Set_fact rbdmirror_to_kill_gid
ansible.builtin.set_fact:
rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}"
with_items: "{{ rbdmirror_gids }}"
when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
tasks:
- name: stop rbdmirror service
service:
- name: Stop rbdmirror service
ansible.builtin.service:
name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }}
state: stopped
enabled: no
enabled: false
delegate_to: "{{ rbdmirror_to_kill }}"
failed_when: false
- name: purge related directories
file:
- name: Purge related directories
ansible.builtin.file:
path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }}
state: absent
delegate_to: "{{ rbdmirror_to_kill }}"
post_tasks:
- name: get servicemap details
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
- name: Get servicemap details
ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health
failed_when:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
@ -115,10 +118,11 @@
until:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
- rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
changed_when: false
when: rbdmirror_to_kill_gid is defined
retries: 12
delay: 10
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false

View File

@ -11,19 +11,19 @@
# automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to remove rgw from the ceph cluster
- name: Confirm whether user really meant to remove rgw from the ceph cluster
hosts: localhost
become: false
gather_facts: false
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
private: no
private: false
tasks:
- name: exit playbook, if no rgw was given
- name: Exit playbook, if no rgw was given
when: rgw_to_kill is not defined or rgw_to_kill | length == 0
fail:
ansible.builtin.fail:
msg: >
rgw_to_kill must be declared.
Exiting shrink-cluster playbook, no RGW was removed. On the command
@ -31,82 +31,85 @@
"-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single
RGW each time the playbook runs.
- name: exit playbook, if user did not mean to shrink cluster
- name: Exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes'
fail:
ansible.builtin.fail:
msg: >
Exiting shrink-mon playbook, no monitor was removed. To shrink the
cluster, either say 'yes' on the prompt or use
'-e ireallymeanit=yes' on the command line when invoking the playbook
- name: gather facts and mons and rgws
- name: Gather facts and mons and rgws
hosts:
- "{{ mon_group_name | default('mons') }}[0]"
- "{{ rgw_group_name | default('rgws') }}"
become: true
gather_facts: false
tasks:
- name: gather facts
setup:
- name: Gather facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
- hosts: mons[0]
- name: Shrink rgw service
hosts: mons[0]
become: true
gather_facts: false
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: set_fact container_exec_cmd for mon0
set_fact:
- name: Set_fact container_exec_cmd for mon0
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
- name: get rgw instances
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
- name: Get rgw instances
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: rgw_instances
changed_when: false
- name: exit playbook, if the rgw_to_kill doesn't exist
- name: Exit playbook, if the rgw_to_kill doesn't exist
when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
fail:
ansible.builtin.fail:
msg: >
It seems that the rgw instance given is not part of the ceph cluster. Please
make sure it is.
The rgw instance format is $(hostname}.rgw$(instance number).
tasks:
- name: get rgw host running the rgw instance to kill
set_fact:
- name: Get rgw host running the rgw instance to kill
ansible.builtin.set_fact:
rgw_host: '{{ item }}'
with_items: '{{ groups[rgw_group_name] }}'
when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
- name: stop rgw service
service:
- name: Stop rgw service
ansible.builtin.service:
name: ceph-radosgw@rgw.{{ rgw_to_kill }}
state: stopped
enabled: no
enabled: false
delegate_to: "{{ rgw_host }}"
failed_when: false
- name: ensure that the rgw is stopped
command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa 303
- name: Ensure that the rgw is stopped
ansible.builtin.command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa command-instead-of-module
register: rgw_to_kill_status
failed_when: rgw_to_kill_status.rc == 0
changed_when: false
@ -114,8 +117,8 @@
retries: 5
delay: 2
- name: exit if rgw_to_kill is reported in ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
- name: Exit if rgw_to_kill is reported in ceph status
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: ceph_status
changed_when: false
failed_when:
@ -127,12 +130,12 @@
retries: 3
delay: 3
- name: purge directories related to rgw
file:
- name: Purge directories related to rgw
ansible.builtin.file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }}
state: absent
delegate_to: "{{ rgw_host }}"
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false

View File

@ -5,26 +5,23 @@
# Usage:
# ansible-playbook storage-inventory.yml
- name: gather facts and check the init system
hosts: "{{ osd_group_name|default('osds') }}"
- name: Gather facts and check the init system
hosts: osds
become: true
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: query each host for storage device inventory
hosts: "{{ osd_group_name|default('osds') }}"
- name: Gather facts on all Ceph hosts
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: Query each host for storage device inventory
hosts: osds
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: list storage inventory
- name: List storage inventory
ceph_volume:
action: "inventory"
environment:

View File

@ -1,29 +1,30 @@
---
# This playbook switches from non-containerized to containerized Ceph daemons
- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons
- name: Confirm whether user really meant to switch from non-containerized to containerized ceph daemons
hosts: localhost
gather_facts: false
any_errors_fatal: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons?
default: 'no'
private: no
private: false
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: fail when less than three monitors
fail:
- name: Fail when less than three monitors
ansible.builtin.fail:
msg: "This playbook requires at least three monitors."
when: groups[mon_group_name] | length | int < 3
- name: exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
fail:
- name: Exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
ansible.builtin.fail:
msg: >
"Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook,
cluster did not switch from non-containerized to containerized ceph daemons.
@ -33,7 +34,7 @@
when: ireallymeanit != 'yes'
- name: gather facts
- name: Gather facts
hosts:
- "{{ mon_group_name|default('mons') }}"
@ -47,52 +48,57 @@
become: true
vars:
delegate_facts_host: True
delegate_facts_host: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: gather and delegate facts
setup:
- name: Gather and delegate facts
ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}"
run_once: true
when: delegate_facts_host | bool
tags: always
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-validate role
ansible.builtin.import_role:
name: ceph-validate
- name: switching from non-containerized to containerized ceph mon
- name: Switching from non-containerized to containerized ceph mon
vars:
containerized_deployment: true
switch_to_containers: True
mon_group_name: mons
switch_to_containers: true
mon_group_name: mons
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: true
pre_tasks:
- name: select a running monitor
set_fact: mon_host={{ item }}
- name: Select a running monitor
ansible.builtin.set_fact:
mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when: item != inventory_hostname
- name: stop non-containerized ceph mon
service:
- name: Stop non-containerized ceph mon
ansible.builtin.service:
name: "ceph-mon@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
enabled: false
- name: remove old systemd unit files
file:
- name: Remove old systemd unit files
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
@ -101,61 +107,67 @@
- /lib/systemd/system/ceph-mon@.service
- /lib/systemd/system/ceph-mon.target
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- name: check for existing old leveldb file extension (ldb)
shell: stat /var/lib/ceph/mon/*/store.db/*.ldb
- name: Check for existing old leveldb file extension (ldb)
ansible.builtin.shell: stat /var/lib/ceph/mon/*/store.db/*.ldb
changed_when: false
failed_when: false
register: ldb_files
- name: rename leveldb extension from ldb to sst
shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb
- name: Rename leveldb extension from ldb to sst
ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb
changed_when: false
failed_when: false
when: ldb_files.rc == 0
- name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
- name: Copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
ansible.builtin.command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
args:
creates: /etc/ceph/{{ cluster }}.mon.keyring
changed_when: false
failed_when: false
tasks:
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-mon role
ansible.builtin.import_role:
name: ceph-mon
post_tasks:
- name: waiting for the monitor to join the quorum...
command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
- name: Waiting for the monitor to join the quorum...
ansible.builtin.command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
register: ceph_health_raw
until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
changed_when: false
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
- name: switching from non-containerized to containerized ceph mgr
- name: Switching from non-containerized to containerized ceph mgr
hosts: "{{ mgr_group_name|default('mgrs') }}"
@ -169,15 +181,15 @@
# failed_when: false is here because if we're
# working with a jewel cluster then ceph mgr
# will not exist
- name: stop non-containerized ceph mgr(s)
service:
- name: Stop non-containerized ceph mgr(s)
ansible.builtin.service:
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
enabled: false
failed_when: false
- name: remove old systemd unit files
file:
- name: Remove old systemd unit files
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
@ -186,66 +198,75 @@
- /lib/systemd/system/ceph-mgr@.service
- /lib/systemd/system/ceph-mgr.target
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-mgr role
ansible.builtin.import_role:
name: ceph-mgr
- name: set osd flags
- name: Set osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: get pool list
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
- name: Get pool list
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
changed_when: false
check_mode: false
- name: get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
- name: Get balancer module status
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_switch
changed_when: false
check_mode: false
- name: set_fact pools_pgautoscaler_mode
set_fact:
- name: Set_fact pools_pgautoscaler_mode
ansible.builtin.set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- name: disable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
- name: Disable balancer
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status_switch.stdout | from_json)['active'] | bool
- name: disable pg autoscale on pools
- name: Disable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
@ -258,7 +279,7 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: set osd flags
- name: Set osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
@ -270,12 +291,12 @@
- nodeep-scrub
- name: switching from non-containerized to containerized ceph osd
- name: Switching from non-containerized to containerized ceph osd
vars:
containerized_deployment: true
osd_group_name: osds
switch_to_containers: True
switch_to_containers: true
hosts: "{{ osd_group_name|default('osds') }}"
@ -283,11 +304,12 @@
become: true
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: collect running osds
shell: |
- name: Collect running osds
ansible.builtin.shell: |
set -o pipefail;
systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume'
register: running_osds
@ -295,28 +317,28 @@
failed_when: false
# systemd module does not support --runtime option
- name: disable ceph-osd@.service runtime-enabled
command: "systemctl disable --runtime {{ item }}" # noqa 303
- name: Disable ceph-osd@.service runtime-enabled
ansible.builtin.command: "systemctl disable --runtime {{ item }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
with_items: "{{ running_osds.stdout_lines | default([]) }}"
when: item.startswith('ceph-osd@')
- name: stop/disable/mask non-containerized ceph osd(s) (if any)
systemd:
- name: Stop/disable/mask non-containerized ceph osd(s) (if any)
ansible.builtin.systemd:
name: "{{ item }}"
state: stopped
enabled: no
enabled: false
with_items: "{{ running_osds.stdout_lines | default([]) }}"
when: running_osds != []
- name: disable ceph.target
systemd:
- name: Disable ceph.target
ansible.builtin.systemd:
name: ceph.target
enabled: no
enabled: false
- name: remove old ceph-osd systemd units
file:
- name: Remove old ceph-osd systemd units
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
@ -327,44 +349,45 @@
- /lib/systemd/system/ceph-osd@.service
- /lib/systemd/system/ceph-volume@.service
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- name: check for existing old leveldb file extension (ldb)
shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb
- name: Check for existing old leveldb file extension (ldb)
ansible.builtin.shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false
failed_when: false
register: ldb_files
- name: rename leveldb extension from ldb to sst
shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
- name: Rename leveldb extension from ldb to sst
ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false
failed_when: false
when: ldb_files.rc == 0
- name: check if containerized osds are already running
command: >
- name: Check if containerized osds are already running
ansible.builtin.command: >
{{ container_binary }} ps -q --filter='name=ceph-osd'
changed_when: false
failed_when: false
register: osd_running
- name: get osd directories
command: >
- name: Get osd directories
ansible.builtin.command: >
find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d
register: osd_dirs
changed_when: false
failed_when: false
- name: unmount all the osd directories
command: >
- name: Unmount all the osd directories
ansible.builtin.command: >
umount {{ item }}
changed_when: false
failed_when: false
@ -372,21 +395,25 @@
when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0
tasks:
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-osd role
ansible.builtin.import_role:
name: ceph-osd
post_tasks:
- name: container - waiting for clean pgs...
command: >
- name: Container - waiting for clean pgs...
ansible.builtin.command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
register: ceph_health_post
until: >
@ -399,17 +426,20 @@
changed_when: false
- name: unset osd flags
- name: Unset osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: re-enable pg autoscale on pools
- name: Re-enable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
@ -422,7 +452,7 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: unset osd flags
- name: Unset osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
@ -434,13 +464,13 @@
- noout
- nodeep-scrub
- name: re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
- name: Re-enable balancer
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status_switch.stdout | from_json)['active'] | bool
- name: switching from non-containerized to containerized ceph mds
- name: Switching from non-containerized to containerized ceph mds
hosts: "{{ mds_group_name|default('mdss') }}"
@ -452,14 +482,14 @@
become: true
pre_tasks:
- name: stop non-containerized ceph mds(s)
service:
- name: Stop non-containerized ceph mds(s)
ansible.builtin.service:
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
enabled: false
- name: remove old systemd unit files
file:
- name: Remove old systemd unit files
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
@ -468,34 +498,40 @@
- /lib/systemd/system/ceph-mds@.service
- /lib/systemd/system/ceph-mds.target
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
name: ceph-mds
- name: Import ceph-mds role
ansible.builtin.import_role:
name: ceph-mds
- name: switching from non-containerized to containerized ceph rgw
- name: Switching from non-containerized to containerized ceph rgw
hosts: "{{ rgw_group_name|default('rgws') }}"
@ -506,33 +542,36 @@
serial: 1
become: true
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config
tasks_from: rgw_systemd_environment_file.yml
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- name: stop non-containerized ceph rgw(s)
service:
- name: Stop non-containerized ceph rgw(s)
ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
enabled: false
with_items: "{{ rgw_instances }}"
- name: remove old systemd unit files
file:
- name: Remove old systemd unit files
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
@ -541,20 +580,24 @@
- /lib/systemd/system/ceph-radosgw@.service
- /lib/systemd/system/ceph-radosgw.target
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-rgw role
ansible.builtin.import_role:
name: ceph-rgw
- name: switching from non-containerized to containerized ceph rbd-mirror
- name: Switching from non-containerized to containerized ceph rbd-mirror
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
@ -565,21 +608,21 @@
serial: 1
become: true
pre_tasks:
- name: check for ceph rbd mirror services
command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa 303
- name: Check for ceph rbd mirror services
ansible.builtin.command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa: command-instead-of-module
changed_when: false
register: rbdmirror_services
- name: stop non-containerized ceph rbd mirror(s)
service:
- name: Stop non-containerized ceph rbd mirror(s) # noqa: ignore-errors
ansible.builtin.service:
name: "{{ item.split('=')[1] }}"
state: stopped
enabled: no
enabled: false
ignore_errors: true
loop: "{{ rbdmirror_services.stdout_lines }}"
- name: remove old systemd unit files
file:
- name: Remove old systemd unit files
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
@ -588,34 +631,40 @@
- /lib/systemd/system/ceph-rbd-mirror@.service
- /lib/systemd/system/ceph-rbd-mirror.target
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-rbd-mirror role
ansible.builtin.import_role:
name: ceph-rbd-mirror
- name: switching from non-containerized to containerized ceph nfs
- name: Switching from non-containerized to containerized ceph nfs
hosts: "{{ nfs_group_name|default('nfss') }}"
@ -630,40 +679,46 @@
# failed_when: false is here because if we're
# working with a jewel cluster then ceph nfs
# will not exist
- name: stop non-containerized ceph nfs(s)
service:
- name: Stop non-containerized ceph nfs(s)
ansible.builtin.service:
name: nfs-ganesha
state: stopped
enabled: no
enabled: false
failed_when: false
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-nfs role
ansible.builtin.import_role:
name: ceph-nfs
- name: switching from non-containerized to containerized iscsigws
- name: Switching from non-containerized to containerized iscsigws
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
vars:
containerized_deployment: true
@ -671,21 +726,22 @@
become: true
serial: 1
pre_tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: stop iscsigw services
service:
- name: Stop iscsigw services
ansible.builtin.service:
name: "{{ item }}"
state: stopped
enabled: no
enabled: false
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
- name: remove old systemd unit files
file:
- name: Remove old systemd unit files
ansible.builtin.file:
path: "/usr/lib/systemd/system/{{ item }}.service"
state: absent
with_items:
@ -693,29 +749,34 @@
- rbd-target-gw
- rbd-target-api
tasks:
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- import_role:
- name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw
- name: switching from non-containerized to containerized ceph-crash
- name: Switching from non-containerized to containerized ceph-crash
hosts:
- "{{ mon_group_name | default('mons') }}"
@ -729,26 +790,30 @@
containerized_deployment: true
become: true
tasks:
- name: stop non-containerized ceph-crash
service:
- name: Stop non-containerized ceph-crash
ansible.builtin.service:
name: ceph-crash
state: stopped
enabled: no
enabled: false
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- import_role:
- name: Import ceph-crash role
ansible.builtin.import_role:
name: ceph-crash
- name: final task
- name: Final task
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ mgr_group_name|default('mgrs') }}"
@ -759,11 +824,12 @@
containerized_deployment: true
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories
command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false

View File

@ -11,45 +11,51 @@
# 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`.
# 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`.
- hosts: mons
become: True
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-fetch-keys
- hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- mgrs
- iscsi-gw
- name: Fetch keys
hosts: mons
become: true
tasks:
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Import ceph-fetch-keys role
ansible.builtin.import_role:
name: ceph-fetch-keys
- name: Take over existing cluster
hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- mgrs
- iscsi-gw
become: true
tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
post_tasks:
- name: get the name of the existing ceph cluster
shell: |
- name: Get the name of the existing ceph cluster
ansible.builtin.shell: |
set -o pipefail;
basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1)
changed_when: false
register: cluster_name
- name: "stat {{ cluster_name.stdout }}.conf"
stat:
- name: Run stat module on Ceph configuration file
ansible.builtin.stat:
path: "/etc/ceph/{{ cluster_name.stdout }}.conf"
register: ceph_conf_stat
# Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format
- name: "make a backup of original {{ cluster_name.stdout }}.conf"
copy:
- name: Make a backup of original Ceph configuration file
ansible.builtin.copy:
src: "/etc/ceph/{{ cluster_name.stdout }}.conf"
dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak"
remote_src: true
@ -57,7 +63,7 @@
group: "{{ ceph_conf_stat.stat.gr_name }}"
mode: "{{ ceph_conf_stat.stat.mode }}"
- name: generate ceph configuration file
- name: Generate ceph configuration file
openstack.config_template.config_template:
src: "roles/ceph-config/templates/ceph.conf.j2"
dest: "/etc/ceph/{{ cluster_name.stdout }}.conf"

View File

@ -8,16 +8,16 @@
# the operation won't last for too long.
- hosts: <your_host>
gather_facts: False
gather_facts: false
tasks:
- name: Set the noout flag
command: ceph osd set noout
ansible.builtin.command: ceph osd set noout
delegate_to: <your_monitor>
- name: Turn off the server
command: poweroff
ansible.builtin.command: poweroff
- name: Wait for the server to go down
local_action:
@ -35,5 +35,5 @@
timeout: 3600
- name: Unset the noout flag
command: ceph osd unset noout
ansible.builtin.command: ceph osd unset noout
delegate_to: <your_monitor>

View File

@ -10,7 +10,7 @@
- hosts: mons
serial: 1
sudo: True
sudo: true
vars:
backup_dir: /tmp/
@ -18,13 +18,13 @@
tasks:
- name: Check if the node has be migrated already
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: mon_archive_leftover
@ -32,36 +32,36 @@
when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
- name: Compress the store as much as possible
command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
ansible.builtin.command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
when: migration_completed.stat.exists == False
- name: Check if sysvinit
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
changed_when: False
changed_when: false
- name: Check if upstart
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
changed_when: False
changed_when: false
- name: Check if init does what it is supposed to do (Sysvinit)
shell: >
ansible.builtin.shell: >
ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null
register: ceph_status_sysvinit
changed_when: False
changed_when: false
# can't complete the condition since the previous taks never ran...
- fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
- name: Check if init does what it is supposed to do (upstart)
shell: >
ansible.builtin.shell: >
ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null
register: ceph_status_upstart
changed_when: False
changed_when: false
- fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
@ -124,7 +124,7 @@
# NOTE (leseb): should we convert upstart to sysvinit here already?
- name: Archive monitor stores
shell: >
ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar
@ -138,7 +138,7 @@
when: migration_completed.stat.exists == False
- name: Reboot the server
command: reboot
ansible.builtin.command: reboot
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
@ -154,16 +154,16 @@
when: migration_completed.stat.exists == False
- name: Check if sysvinit
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
changed_when: False
changed_when: false
- name: Check if upstart
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
changed_when: False
changed_when: false
- name: Make sure the monitor is stopped (Upstart)
service: >
@ -190,13 +190,13 @@
when: migration_completed.stat.exists == False
- name: Copy keys and configs
shell: >
ansible.builtin.shell: >
cp etc/ceph/* /etc/ceph/
chdir=/var/lib/ceph/
when: migration_completed.stat.exists == False
- name: Configure RHEL7 for sysvinit
shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
ansible.builtin.shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
when: migration_completed.stat.exists == False
# NOTE (leseb): at this point the upstart and sysvinit checks are not necessary
@ -217,7 +217,7 @@
when: migration_completed.stat.exists == False
- name: Waiting for the monitor to join the quorum...
shell: >
ansible.builtin.shell: >
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
register: result
until: result.rc == 0
@ -238,20 +238,20 @@
- hosts: osds
serial: 1
sudo: True
sudo: true
vars:
backup_dir: /tmp/
tasks:
- name: Check if the node has be migrated already
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: osd_archive_leftover
@ -259,44 +259,44 @@
when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True
- name: Check if init does what it is supposed to do (Sysvinit)
shell: >
ansible.builtin.shell: >
ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null
register: ceph_status_sysvinit
changed_when: False
changed_when: false
# can't complete the condition since the previous taks never ran...
- fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
- name: Check if init does what it is supposed to do (upstart)
shell: >
ansible.builtin.shell: >
ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]"
register: ceph_status_upstart
changed_when: False
changed_when: false
- fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
- name: Set the noout flag
command: ceph osd set noout
ansible.builtin.command: ceph osd set noout
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name][0] }}"
when: migration_completed.stat.exists == False
- name: Check if sysvinit
shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
register: osdsysvinit
failed_when: false
changed_when: False
changed_when: false
- name: Check if upstart
shell: stat /var/lib/ceph/osd/ceph-*/upstart
ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/upstart
register: osdupstart
failed_when: false
changed_when: False
changed_when: false
- name: Archive ceph configs
shell: >
ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar
@ -321,7 +321,7 @@
when: migration_completed.stat.exists == False
- name: Collect OSD ports
shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq
ansible.builtin.shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq
register: osd_ports
when: migration_completed.stat.exists == False
@ -349,11 +349,11 @@
when: migration_completed.stat.exists == False
- name: Configure RHEL with sysvinit
shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
ansible.builtin.shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
when: migration_completed.stat.exists == False
- name: Reboot the server
command: reboot
ansible.builtin.command: reboot
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
@ -379,7 +379,7 @@
when: migration_completed.stat.exists == False
- name: Copy keys and configs
shell: >
ansible.builtin.shell: >
cp etc/ceph/* /etc/ceph/
chdir=/var/lib/ceph/
when: migration_completed.stat.exists == False
@ -405,7 +405,7 @@
# - "{{ osd_ports.stdout_lines }}"
- name: Waiting for clean PGs...
shell: >
ansible.builtin.shell: >
test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')"
register: result
until: result.rc == 0
@ -425,27 +425,27 @@
when: migration_completed.stat.exists == False
- name: Unset the noout flag
command: ceph osd unset noout
ansible.builtin.command: ceph osd unset noout
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name][0] }}"
when: migration_completed.stat.exists == False
- hosts: rgws
serial: 1
sudo: True
sudo: true
vars:
backup_dir: /tmp/
tasks:
- name: Check if the node has be migrated already
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/radosgw/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
stat: >
ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: rgw_archive_leftover
@ -453,7 +453,7 @@
when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True
- name: Archive rados gateway configs
shell: >
ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar
@ -494,7 +494,7 @@
when: migration_completed.stat.exists == False
- name: Reboot the server
command: reboot
ansible.builtin.command: reboot
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
@ -520,7 +520,7 @@
when: migration_completed.stat.exists == False
- name: Copy keys and configs
shell: >
ansible.builtin.shell: >
{{ item }}
chdir=/var/lib/ceph/
with_items: cp etc/ceph/* /etc/ceph/

View File

@ -31,7 +31,7 @@
tasks:
- name: load a variable file for devices partition
- name: Load a variable file for devices partition
include_vars: "{{ item }}"
with_first_found:
- files:
@ -39,24 +39,24 @@
- "host_vars/default.yml"
skip: true
- name: exit playbook, if devices not defined
fail:
- name: Exit playbook, if devices not defined
ansible.builtin.fail:
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
when: devices is not defined
- name: install sgdisk(gdisk)
package:
- name: Install sgdisk(gdisk)
ansible.builtin.package:
name: gdisk
state: present
register: result
until: result is succeeded
- name: erase all previous partitions(dangerous!!!)
shell: sgdisk --zap-all -- /dev/{{item.device_name}}
- name: Erase all previous partitions(dangerous!!!)
ansible.builtin.shell: sgdisk --zap-all -- /dev/{{item.device_name}}
with_items: "{{ devices }}"
- name: make osd partitions
shell: >
- name: Make osd partitions
ansible.builtin.shell: >
sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}"
"--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}"
--mbrtogpt -- /dev/{{item.0.device_name}}
@ -74,8 +74,8 @@
group: 64045
when: ansible_facts['os_family'] == "Debian"
- name: change partitions ownership
file:
- name: Change partitions ownership
ansible.builtin.file:
path: "/dev/{{item.0.device_name}}{{item.1.index}}"
owner: "{{ owner | default('root')}}"
group: "{{ group | default('disk')}}"
@ -85,8 +85,8 @@
when:
item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$')
- name: change partitions ownership
file:
- name: Change partitions ownership
ansible.builtin.file:
path: "/dev/{{item.0.device_name}}p{{item.1.index}}"
owner: "{{ owner | default('root')}}"
group: "{{ group | default('disk')}}"

View File

@ -37,69 +37,69 @@
serial: 1
tasks:
- name: get osd(s) if directory stat
stat:
- name: Get osd(s) if directory stat
ansible.builtin.stat:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_dir_stat
with_subelements:
- "{{ osds_journal_devices }}"
- partitions
- name: exit playbook osd(s) is not on this host
fail:
- name: Exit playbook osd(s) is not on this host
ansible.builtin.fail:
msg: exit playbook osd(s) is not on this host
with_items:
osds_dir_stat.results
when: osds_dir_stat is defined and item.stat.exists == false
- name: install sgdisk(gdisk)
package:
- name: Install sgdisk(gdisk)
ansible.builtin.package:
name: gdisk
state: present
register: result
until: result is succeeded
when: osds_journal_devices is defined
- name: generate uuid for osds journal
command: uuidgen
- name: Generate uuid for osds journal
ansible.builtin.command: uuidgen
register: osds
with_subelements:
- "{{ osds_journal_devices }}"
- partitions
- name: make osd partitions on ssd
shell: >
- name: Make osd partitions on ssd
ansible.builtin.shell: >
sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
with_items: "{{ osds.results }}"
- name: stop osd(s) service
service:
- name: Stop osd(s) service
ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
with_items: "{{ osds.results }}"
- name: flush osd(s) journal
command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
- name: Flush osd(s) journal
ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
with_items: "{{ osds.results }}"
when: osds_journal_devices is defined
- name: update osd(s) journal soft link
command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
- name: Update osd(s) journal soft link
ansible.builtin.command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
with_items: "{{ osds.results }}"
- name: update osd(s) journal uuid
command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
- name: Update osd(s) journal uuid
ansible.builtin.command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
with_items: "{{ osds.results }}"
- name: initialize osd(s) new journal
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
- name: Initialize osd(s) new journal
ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items: "{{ osds.results }}"
- name: start osd(s) service
service:
- name: Start osd(s) service
ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
with_items: "{{ osds.results }}"

View File

@ -1,11 +1,11 @@
---
# Nukes a multisite config
- hosts: rgws
become: True
become: true
tasks:
- include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml
handlers:
# Ansible 2.1.0 bug will ignore included handlers without this
- name: import_tasks roles/ceph-rgw/handlers/main.yml
- name: Import_tasks roles/ceph-rgw/handlers/main.yml
import_tasks: roles/ceph-rgw/handlers/main.yml

View File

@ -40,11 +40,11 @@
# automation scripts to avoid interactive prompt.
- hosts: localhost
gather_facts: no
gather_facts: false
vars_prompt:
- name: target_host
- name: target_host # noqa: name[casing]
prompt: please enter the target hostname which to recover osds after ssd journal failure
private: no
private: false
tasks:
- add_host:
name: "{{ target_host }}"
@ -59,16 +59,16 @@
- fail: msg="please define dev_ssds variable"
when: dev_ssds|length <= 0
- name: get osd(s) if directory stat
stat:
- name: Get osd(s) if directory stat
ansible.builtin.stat:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_dir_stat
with_subelements:
- "{{ dev_ssds }}"
- partitions
- name: exit playbook osd(s) is not on this host
fail:
- name: Exit playbook osd(s) is not on this host
ansible.builtin.fail:
msg: exit playbook osds is not no this host
with_items:
osds_dir_stat.results
@ -76,40 +76,40 @@
- osds_dir_stat is defined | bool
- item.stat.exists == false
- name: install sgdisk(gdisk)
package:
- name: Install sgdisk(gdisk)
ansible.builtin.package:
name: gdisk
state: present
register: result
until: result is succeeded
- name: get osd(s) journal uuid
command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
- name: Get osd(s) journal uuid
ansible.builtin.command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_uuid
with_subelements:
- "{{ dev_ssds }}"
- partitions
- name: make partitions on new ssd
shell: >
- name: Make partitions on new ssd
ansible.builtin.shell: >
sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
with_items: "{{ osds_uuid.results }}"
- name: stop osd(s) service
service:
- name: Stop osd(s) service
ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
with_items: "{{ osds_uuid.results }}"
- name: reinitialize osd(s) journal in new ssd
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
- name: Reinitialize osd(s) journal in new ssd
ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items: "{{ osds_uuid.results }}"
- name: start osd(s) service
service:
- name: Start osd(s) service
ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
with_items: "{{ osds_uuid.results }}"

View File

@ -15,38 +15,38 @@
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
- name: gather facts and check the init system
- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
become: True
become: true
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- ansible.builtin.debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to replace osd(s)
- name: Confirm whether user really meant to replace osd(s)
hosts: localhost
become: true
vars_prompt:
- name: ireallymeanit
- name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to replace the osd(s)?
default: 'no'
private: no
private: false
vars:
mon_group_name: mons
osd_group_name: osds
pre_tasks:
- name: exit playbook, if user did not mean to replace the osd(s)
fail:
- name: Exit playbook, if user did not mean to replace the osd(s)
ansible.builtin.fail:
msg: "Exiting replace-osd playbook, no osd(s) was/were replaced..
To replace the osd(s), either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- name: exit playbook, if no osd(s) was/were given
fail:
- name: Exit playbook, if no osd(s) was/were given
ansible.builtin.fail:
msg: "osd_to_replace must be declared
Exiting replace-osd playbook, no OSD(s) was/were replaced.
On the command line when invoking the playbook, you can use
@ -54,36 +54,36 @@
when: osd_to_replace is not defined
tasks:
- import_role:
- ansible.builtin.import_role:
name: ceph-defaults
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
- name: Set_fact container_exec_cmd build docker exec command (containerized)
ansible.builtin.set_fact:
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
- name: Exit playbook, if can not connect to the cluster
ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 5
delay: 2
- name: find the host(s) where the osd(s) is/are running on
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
- name: Find the host(s) where the osd(s) is/are running on
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
with_items: "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: find_osd_hosts
- name: set_fact osd_hosts
set_fact:
- name: Set_fact osd_hosts
ansible.builtin.set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}"
with_items: "{{ find_osd_hosts.results }}"
- name: check if ceph admin key exists on the osd nodes
stat:
- name: Check if ceph admin key exists on the osd nodes
ansible.builtin.stat:
path: "/etc/ceph/{{ cluster }}.client.admin.keyring"
register: ceph_admin_key
with_items: "{{ osd_hosts }}"
@ -91,8 +91,8 @@
failed_when: false
when: not containerized_deployment | bool
- name: fail when admin key is not present
fail:
- name: Fail when admin key is not present
ansible.builtin.fail:
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
with_items: "{{ ceph_admin_key.results }}"
when:
@ -100,8 +100,8 @@
- item.stat.exists == false
# NOTE(leseb): using '>' is the only way I could have the command working
- name: find osd device based on the id
shell: >
- name: Find osd device based on the id
ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}'
@ -112,8 +112,8 @@
delegate_to: "{{ item.0 }}"
when: containerized_deployment | bool
- name: zapping osd(s) - container
shell: >
- name: Zapping osd(s) - container
ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
zap {{ item.1 }}
@ -124,8 +124,8 @@
delegate_to: "{{ item.0 }}"
when: containerized_deployment | bool
- name: zapping osd(s) - non container
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
- name: Zapping osd(s) - non container
ansible.builtin.command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
run_once: true
with_together:
- "{{ osd_hosts }}"
@ -133,8 +133,8 @@
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- name: destroying osd(s)
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
- name: Destroying osd(s)
ansible.builtin.command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
run_once: true
with_together:
- "{{ osd_hosts }}"
@ -142,8 +142,8 @@
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- name: replace osd(s) - prepare - non container
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
- name: Replace osd(s) - prepare - non container
ansible.builtin.command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
run_once: true
delegate_to: "{{ item.0 }}"
with_together:
@ -151,8 +151,8 @@
- "{{ osd_to_replace_disks.results }}"
- "{{ osd_to_replace.split(',') }}"
- name: replace osd(s) - prepare - container
shell: >
- name: Replace osd(s) - prepare - container
ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
prepare {{ item.1 }}
@ -162,16 +162,16 @@
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
- name: replace osd(s) - activate - non container
command: ceph-disk activate {{ item.1 }}1
- name: Replace osd(s) - activate - non container
ansible.builtin.command: ceph-disk activate {{ item.1 }}1
run_once: true
delegate_to: "{{ item.0 }}"
with_together:
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
- name: replace osd(s) - activate - container
shell: >
- name: Replace osd(s) - activate - container
ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
activate {{ item.1 }}1
@ -181,10 +181,10 @@
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- name: Show ceph health
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: show ceph osd tree
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
- name: Show ceph osd tree
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -37,5 +37,5 @@ pools:
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
keys:
- { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
- { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
- { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
- { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Sébastien Han
description: Installs A Ceph Client
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,13 +1,13 @@
---
- name: set_fact delegated_node
set_fact:
- name: Set_fact delegated_node
ansible.builtin.set_fact:
delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
- name: set_fact admin_key_presence
set_fact:
- name: Set_fact admin_key_presence
ansible.builtin.set_fact:
admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
- name: create cephx key(s)
- name: Create cephx key(s)
ceph_key:
name: "{{ item.name }}"
caps: "{{ item.caps }}"
@ -30,8 +30,8 @@
- inventory_hostname == groups.get('_filtered_clients') | first
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: slurp client cephx key(s)
slurp:
- name: Slurp client cephx key(s)
ansible.builtin.slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items: "{{ keys }}"
register: slurp_client_keys
@ -42,16 +42,17 @@
- inventory_hostname == groups.get('_filtered_clients') | first
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: pool related tasks
- name: Pool related tasks
when:
- admin_key_presence | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- name: create ceph pool(s)
- name: Create ceph pool(s)
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
@ -72,8 +73,8 @@
changed_when: false
delegate_to: "{{ delegated_node }}"
- name: get client cephx keys
copy:
- name: Get client cephx keys
ansible.builtin.copy:
dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}"
mode: "{{ item.item.get('mode', '0600') }}"
@ -82,4 +83,3 @@
with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
when: not item.get('skipped', False)
no_log: "{{ no_log_on_ceph_key_tasks }}"

View File

@ -1,10 +1,10 @@
---
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
- name: Include pre_requisite.yml
ansible.builtin.include_tasks: pre_requisite.yml
when: groups.get(mon_group_name, []) | length > 0
- name: include create_users_keys.yml
include_tasks: create_users_keys.yml
- name: Include create_users_keys.yml
ansible.builtin.include_tasks: create_users_keys.yml
when:
- user_config | bool
- not rolling_update | default(False) | bool

View File

@ -1,7 +1,10 @@
---
- name: copy ceph admin keyring
- name: Copy ceph admin keyring
when:
- cephx | bool
- copy_admin_key | bool
block:
- name: get keys from monitors
- name: Get keys from monitors
ceph_key:
name: client.admin
cluster: "{{ cluster }}"
@ -15,14 +18,11 @@
run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: copy ceph key(s) if needed
copy:
- name: Copy ceph key(s) if needed
ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
content: "{{ _admin_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
when:
- cephx | bool
- copy_admin_key | bool

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Sébastien Han
description: Installs Ceph
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,11 +1,12 @@
---
- name: configure cluster name
lineinfile:
- name: Configure cluster name
ansible.builtin.lineinfile:
dest: /etc/sysconfig/ceph
insertafter: EOF
create: yes
create: true
line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER="
mode: "0644"
when: ansible_facts['os_family'] in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check
@ -18,32 +19,34 @@
# - Jewel from latest Canonical 16.04 distro
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: debian based systems - configure cluster name
- name: Debian based systems - configure cluster name
when: ansible_facts['os_family'] == "Debian"
block:
- name: check /etc/default/ceph exist
stat:
- name: Check /etc/default/ceph exist
ansible.builtin.stat:
path: /etc/default/ceph
register: etc_default_ceph
check_mode: no
check_mode: false
- name: configure cluster name
- name: Configure cluster name
when: etc_default_ceph.stat.exists
block:
- name: when /etc/default/ceph is not dir
lineinfile:
- name: When /etc/default/ceph is not dir
ansible.builtin.lineinfile:
dest: /etc/default/ceph
insertafter: EOF
create: yes
create: true
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
mode: "0644"
when: not etc_default_ceph.stat.isdir
- name: when /etc/default/ceph is dir
lineinfile:
- name: When /etc/default/ceph is dir
ansible.builtin.lineinfile:
dest: /etc/default/ceph/ceph
insertafter: EOF
create: yes
create: true
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
mode: "0644"
when: etc_default_ceph.stat.isdir

View File

@ -1,34 +1,36 @@
---
- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian
lineinfile:
- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian
ansible.builtin.lineinfile:
dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}"
insertafter: EOF
create: yes
create: true
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
mode: "0644"
when:
- ansible_facts['os_family'] == 'Debian'
- etc_default_ceph.stat.exists
notify:
- restart ceph mons
- restart ceph mgrs
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph rbdmirrors
- Restart ceph mons
- Restart ceph mgrs
- Restart ceph osds
- Restart ceph mdss
- Restart ceph rgws
- Restart ceph rbdmirrors
- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
lineinfile:
- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
ansible.builtin.lineinfile:
dest: "/etc/sysconfig/ceph"
insertafter: EOF
create: yes
create: true
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
mode: "0644"
when: ansible_facts['os_family'] == 'RedHat'
notify:
- restart ceph mons
- restart ceph mgrs
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph rbdmirrors
- Restart ceph mons
- Restart ceph mgrs
- Restart ceph osds
- Restart ceph mdss
- Restart ceph rgws
- Restart ceph rbdmirrors

View File

@ -1,32 +1,32 @@
---
- name: config repository for Red Hat based OS
- name: Config repository for Red Hat based OS
when: ansible_facts['os_family'] == 'RedHat'
block:
- name: include installs/configure_redhat_repository_installation.yml
include_tasks: installs/configure_redhat_repository_installation.yml
- name: Include installs/configure_redhat_repository_installation.yml
ansible.builtin.include_tasks: installs/configure_redhat_repository_installation.yml
when: ceph_origin == 'repository'
- name: include installs/configure_redhat_local_installation.yml
include_tasks: installs/configure_redhat_local_installation.yml
- name: Include installs/configure_redhat_local_installation.yml
ansible.builtin.include_tasks: installs/configure_redhat_local_installation.yml
when: ceph_origin == 'local'
- name: config repository for Debian based OS
- name: Config repository for Debian based OS
when: ansible_facts['os_family'] == 'Debian'
tags: package-install
block:
- name: include installs/configure_debian_repository_installation.yml
include_tasks: installs/configure_debian_repository_installation.yml
- name: Include installs/configure_debian_repository_installation.yml
ansible.builtin.include_tasks: installs/configure_debian_repository_installation.yml
when: ceph_origin == 'repository'
- name: update apt cache if cache_valid_time has expired
apt:
update_cache: yes
- name: Update apt cache if cache_valid_time has expired
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
register: result
until: result is succeeded
tags: package-install
- name: include installs/configure_suse_repository_installation.yml
include_tasks: installs/configure_suse_repository_installation.yml
- name: Include installs/configure_suse_repository_installation.yml
ansible.builtin.include_tasks: installs/configure_suse_repository_installation.yml
when:
- ansible_facts['os_family'] == 'Suse'
- ceph_origin == 'repository'

View File

@ -1,6 +1,6 @@
---
- name: create rbd client directory
file:
- name: Create rbd client directory
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ rbd_client_directory_owner }}"

View File

@ -1,16 +1,16 @@
---
- name: include debian_community_repository.yml
include_tasks: debian_community_repository.yml
- name: Include debian_community_repository.yml
ansible.builtin.include_tasks: debian_community_repository.yml
when: ceph_repository == 'community'
- name: include debian_dev_repository.yml
include_tasks: debian_dev_repository.yml
- name: Include debian_dev_repository.yml
ansible.builtin.include_tasks: debian_dev_repository.yml
when: ceph_repository == 'dev'
- name: include debian_custom_repository.yml
include_tasks: debian_custom_repository.yml
- name: Include debian_custom_repository.yml
ansible.builtin.include_tasks: debian_custom_repository.yml
when: ceph_repository == 'custom'
- name: include debian_uca_repository.yml
include_tasks: debian_uca_repository.yml
- name: Include debian_uca_repository.yml
ansible.builtin.include_tasks: debian_uca_repository.yml
when: ceph_repository == 'uca'

View File

@ -1,43 +1,45 @@
---
- name: make sure /tmp exists
file:
- name: Make sure /tmp exists
ansible.builtin.file:
path: /tmp
state: directory
mode: "0755"
when: use_installer | bool
- name: use mktemp to create name for rundep
tempfile:
- name: Use mktemp to create name for rundep
ansible.builtin.tempfile:
path: /tmp
prefix: rundep.
register: rundep_location
when: use_installer | bool
- name: copy rundep
copy:
- name: Copy rundep
ansible.builtin.copy:
src: "{{ ansible_dir }}/rundep"
dest: "{{ rundep_location.path }}"
mode: preserve
when: use_installer | bool
- name: install ceph dependencies
script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}"
- name: Install ceph dependencies
ansible.builtin.script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}"
when: use_installer | bool
- name: ensure rsync is installed
package:
- name: Ensure rsync is installed
ansible.builtin.package:
name: rsync
state: present
register: result
until: result is succeeded
- name: synchronize ceph install
synchronize:
- name: Synchronize ceph install
ansible.posix.synchronize:
src: "{{ ceph_installation_dir }}/"
dest: "/"
- name: create user group ceph
group:
- name: Create user group ceph
ansible.builtin.group:
name: 'ceph'
- name: create user ceph
user:
- name: Create user ceph
ansible.builtin.user:
name: 'ceph'

View File

@ -1,22 +1,22 @@
---
- name: include redhat_community_repository.yml
include_tasks: redhat_community_repository.yml
- name: Include redhat_community_repository.yml
ansible.builtin.include_tasks: redhat_community_repository.yml
when: ceph_repository == 'community'
- name: include redhat_rhcs_repository.yml
include_tasks: redhat_rhcs_repository.yml
- name: Include redhat_rhcs_repository.yml
ansible.builtin.include_tasks: redhat_rhcs_repository.yml
when: ceph_repository == 'rhcs'
- name: include redhat_dev_repository.yml
include_tasks: redhat_dev_repository.yml
- name: Include redhat_dev_repository.yml
ansible.builtin.include_tasks: redhat_dev_repository.yml
when: ceph_repository == 'dev'
- name: include redhat_custom_repository.yml
include_tasks: redhat_custom_repository.yml
- name: Include redhat_custom_repository.yml
ansible.builtin.include_tasks: redhat_custom_repository.yml
when: ceph_repository == 'custom'
# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version
- name: purge yum cache
command: yum clean all #noqa: [303]
- name: Purge yum cache
ansible.builtin.command: yum clean all # noqa: [303]
changed_when: false
when: ansible_facts['pkg_mgr'] == 'yum'

View File

@ -1,4 +1,4 @@
---
- name: include suse_obs_repository.yml
include_tasks: suse_obs_repository.yml
- name: Include suse_obs_repository.yml
ansible.builtin.include_tasks: suse_obs_repository.yml
when: ceph_repository == 'obs'

View File

@ -1,20 +1,20 @@
---
- name: install dependencies for apt modules
package:
- name: Install dependencies for apt modules
ansible.builtin.package:
name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
update_cache: yes
update_cache: true
register: result
until: result is succeeded
- name: configure debian ceph community repository stable key
apt_key:
data: "{{ lookup('file', role_path+'/files/cephstable.asc') }}"
- name: Configure debian ceph community repository stable key
ansible.builtin.apt_key:
data: "{{ lookup('file', role_path + '/files/cephstable.asc') }}"
state: present
register: result
until: result is succeeded
- name: configure debian ceph stable community repository
apt_repository:
- name: Configure debian ceph stable community repository
ansible.builtin.apt_repository:
repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
update_cache: yes
update_cache: true

View File

@ -1,14 +1,14 @@
---
- name: configure debian custom apt key
apt_key:
- name: Configure debian custom apt key
ansible.builtin.apt_key:
url: "{{ ceph_custom_key }}"
state: present
register: result
until: result is succeeded
when: ceph_custom_key is defined
- name: configure debian custom repository
apt_repository:
- name: Configure debian custom repository
ansible.builtin.apt_repository:
repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
state: present
update_cache: yes
update_cache: true

View File

@ -1,12 +1,12 @@
---
- name: fetch ceph debian development repository
uri:
- name: Fetch ceph debian development repository
ansible.builtin.uri:
url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}"
return_content: yes
return_content: true
register: ceph_dev_deb_repo
- name: configure ceph debian development repository
apt_repository:
- name: Configure ceph debian development repository
ansible.builtin.apt_repository:
repo: "{{ ceph_dev_deb_repo.content }}"
state: present
update_cache: yes
update_cache: true

View File

@ -1,12 +1,12 @@
---
- name: add ubuntu cloud archive key package
package:
- name: Add ubuntu cloud archive key package
ansible.builtin.package:
name: ubuntu-cloud-keyring
register: result
until: result is succeeded
- name: add ubuntu cloud archive repository
apt_repository:
- name: Add ubuntu cloud archive repository
ansible.builtin.apt_repository:
repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main"
state: present
update_cache: yes
update_cache: true

View File

@ -1,9 +1,9 @@
---
- name: install ceph for debian
apt:
- name: Install ceph for debian
ansible.builtin.apt:
name: "{{ debian_ceph_pkgs | unique }}"
update_cache: no
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
update_cache: false
state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded

View File

@ -1,7 +1,7 @@
---
- name: install red hat storage ceph packages for debian
apt:
- name: Install red hat storage ceph packages for debian
ansible.builtin.apt:
pkg: "{{ debian_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded

View File

@ -1,6 +1,6 @@
---
- name: install ceph bundle
swupd:
- name: Install ceph bundle
community.general.swupd:
name: storage-cluster
state: present
register: result

View File

@ -1,20 +1,20 @@
- name: install dependencies
apt:
- name: Install dependencies
ansible.builtin.apt:
name: "{{ debian_package_dependencies }}"
state: present
update_cache: yes
update_cache: true
cache_valid_time: 3600
register: result
until: result is succeeded
- name: include install_debian_packages.yml
include_tasks: install_debian_packages.yml
- name: Include install_debian_packages.yml
ansible.builtin.include_tasks: install_debian_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs'
- name: include install_debian_rhcs_packages.yml
include_tasks: install_debian_rhcs_packages.yml
- name: Include install_debian_rhcs_packages.yml
ansible.builtin.include_tasks: install_debian_rhcs_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository == 'rhcs'

View File

@ -1,23 +1,23 @@
---
- name: install redhat dependencies
package:
- name: Install redhat dependencies
ansible.builtin.package:
name: "{{ redhat_package_dependencies }}"
state: present
register: result
until: result is succeeded
when: ansible_facts['distribution'] == 'RedHat'
- name: install centos dependencies
yum:
- name: Install centos dependencies
ansible.builtin.yum:
name: "{{ centos_package_dependencies }}"
state: present
register: result
until: result is succeeded
when: ansible_facts['distribution'] == 'CentOS'
- name: install redhat ceph packages
package:
- name: Install redhat ceph packages
ansible.builtin.package:
name: "{{ redhat_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded

View File

@ -1,14 +1,14 @@
---
- name: install SUSE/openSUSE dependencies
package:
- name: Install SUSE/openSUSE dependencies
ansible.builtin.package:
name: "{{ suse_package_dependencies }}"
state: present
register: result
until: result is succeeded
- name: install SUSE/openSUSE ceph packages
package:
- name: Install SUSE/openSUSE ceph packages
ansible.builtin.package:
name: "{{ suse_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded

View File

@ -1,6 +1,6 @@
---
- name: enable red hat storage tools repository
rhsm_repository:
- name: Enable red hat storage tools repository
community.general.rhsm_repository:
name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when:
- mon_group_name in group_names

View File

@ -1,24 +1,24 @@
---
- name: install yum plugin priorities
package:
- name: Install yum plugin priorities
ansible.builtin.package:
name: yum-plugin-priorities
register: result
until: result is succeeded
tags: with_pkg
when: ansible_facts['distribution_major_version'] | int == 7
- name: configure red hat ceph community repository stable key
rpm_key:
- name: Configure red hat ceph community repository stable key
ansible.builtin.rpm_key:
key: "{{ ceph_stable_key }}"
state: present
register: result
until: result is succeeded
- name: configure red hat ceph stable community repository
yum_repository:
- name: Configure red hat ceph stable community repository
ansible.builtin.yum_repository:
name: ceph_stable
description: Ceph Stable $basearch repo
gpgcheck: yes
gpgcheck: true
state: present
gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
@ -27,11 +27,11 @@
register: result
until: result is succeeded
- name: configure red hat ceph stable noarch community repository
yum_repository:
- name: Configure red hat ceph stable noarch community repository
ansible.builtin.yum_repository:
name: ceph_stable_noarch
description: Ceph Stable noarch repo
gpgcheck: yes
gpgcheck: true
state: present
gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"

View File

@ -1,15 +1,16 @@
---
- name: configure red hat custom rpm key
rpm_key:
- name: Configure red hat custom rpm key
ansible.builtin.rpm_key:
key: "{{ ceph_custom_key }}"
state: present
register: result
until: result is succeeded
when: ceph_custom_key is defined
- name: configure red hat custom repository
get_url:
- name: Configure red hat custom repository
ansible.builtin.get_url:
url: "{{ ceph_custom_repo }}"
dest: /etc/yum.repos.d
owner: root
group: root
mode: "0644"

View File

@ -1,21 +1,22 @@
---
- name: fetch ceph red hat development repository
uri:
- name: Fetch ceph red hat development repository
ansible.builtin.uri:
# Use the centos repo since we don't currently have a dedicated red hat repo
url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}"
return_content: yes
return_content: true
register: ceph_dev_yum_repo
- name: configure ceph red hat development repository
copy:
- name: Configure ceph red hat development repository
ansible.builtin.copy:
content: "{{ ceph_dev_yum_repo.content }}"
dest: /etc/yum.repos.d/ceph-dev.repo
owner: root
group: root
backup: yes
mode: "0644"
backup: true
- name: remove ceph_stable repositories
yum_repository:
- name: Remove ceph_stable repositories
ansible.builtin.yum_repository:
name: '{{ item }}'
file: ceph_stable
state: absent

View File

@ -1,3 +1,3 @@
---
- name: include prerequisite_rhcs_cdn_install.yml
include_tasks: prerequisite_rhcs_cdn_install.yml
- name: Include prerequisite_rhcs_cdn_install.yml
ansible.builtin.include_tasks: prerequisite_rhcs_cdn_install.yml

View File

@ -1,8 +1,8 @@
---
- name: configure openSUSE ceph OBS repository
zypper_repository:
- name: Configure openSUSE ceph OBS repository
community.general.zypper_repository:
name: "OBS:filesystems:ceph:{{ ceph_release }}"
state: present
repo: "{{ ceph_obs_repo }}"
auto_import_keys: yes
autorefresh: yes
auto_import_keys: true
autorefresh: true

View File

@ -1,70 +1,70 @@
---
- name: include configure_repository.yml
include_tasks: configure_repository.yml
- name: Include configure_repository.yml
ansible.builtin.include_tasks: configure_repository.yml
tags: package-configure
- name: include installs/install_redhat_packages.yml
include_tasks: installs/install_redhat_packages.yml
- name: Include installs/install_redhat_packages.yml
ansible.builtin.include_tasks: installs/install_redhat_packages.yml
when:
- ansible_facts['os_family'] == 'RedHat'
- (ceph_origin == 'repository' or ceph_origin == 'distro')
tags: package-install
- name: include installs/install_suse_packages.yml
include_tasks: installs/install_suse_packages.yml
- name: Include installs/install_suse_packages.yml
ansible.builtin.include_tasks: installs/install_suse_packages.yml
when: ansible_facts['os_family'] == 'Suse'
tags: package-install
- name: include installs/install_on_debian.yml
include_tasks: installs/install_on_debian.yml
- name: Include installs/install_on_debian.yml
ansible.builtin.include_tasks: installs/install_on_debian.yml
tags: package-install
when: ansible_facts['os_family'] == 'Debian'
- name: include_tasks installs/install_on_clear.yml
include_tasks: installs/install_on_clear.yml
- name: Include_tasks installs/install_on_clear.yml
ansible.builtin.include_tasks: installs/install_on_clear.yml
when: ansible_facts['os_family'] == 'ClearLinux'
tags: package-install
- name: get ceph version
command: ceph --version
- name: Get ceph version
ansible.builtin.command: ceph --version
changed_when: false
check_mode: no
check_mode: false
register: ceph_version
- name: set_fact ceph_version
set_fact:
- name: Set_fact ceph_version
ansible.builtin.set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
- name: include release-rhcs.yml
include_tasks: release-rhcs.yml
- name: Include release-rhcs.yml
ansible.builtin.include_tasks: release-rhcs.yml
when: ceph_repository in ['rhcs', 'dev']
or
ceph_origin == 'distro'
tags: always
- name: set_fact ceph_release - override ceph_release with ceph_stable_release
set_fact:
- name: Set_fact ceph_release - override ceph_release with ceph_stable_release
ansible.builtin.set_fact:
ceph_release: "{{ ceph_stable_release }}"
when:
- ceph_origin == 'repository'
- ceph_repository not in ['dev', 'rhcs', 'custom']
tags: always
- name: include create_rbd_client_dir.yml
include_tasks: create_rbd_client_dir.yml
- name: Include create_rbd_client_dir.yml
ansible.builtin.include_tasks: create_rbd_client_dir.yml
- name: include configure_cluster_name.yml
include_tasks: configure_cluster_name.yml
- name: Include configure_cluster_name.yml
ansible.builtin.include_tasks: configure_cluster_name.yml
- name: include configure_memory_allocator.yml
include_tasks: configure_memory_allocator.yml
- name: Include configure_memory_allocator.yml
ansible.builtin.include_tasks: configure_memory_allocator.yml
when:
- (ceph_tcmalloc_max_total_thread_cache | int) > 0
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- name: include selinux.yml
include_tasks: selinux.yml
- name: Include selinux.yml
ansible.builtin.include_tasks: selinux.yml
when:
- ansible_facts['os_family'] == 'RedHat'
- inventory_hostname in groups.get(nfs_group_name, [])

View File

@ -1,45 +1,45 @@
---
- name: set_fact ceph_release jewel
set_fact:
- name: Set_fact ceph_release jewel
ansible.builtin.set_fact:
ceph_release: jewel
when: ceph_version.split('.')[0] is version('10', '==')
- name: set_fact ceph_release kraken
set_fact:
- name: Set_fact ceph_release kraken
ansible.builtin.set_fact:
ceph_release: kraken
when: ceph_version.split('.')[0] is version('11', '==')
- name: set_fact ceph_release luminous
set_fact:
- name: Set_fact ceph_release luminous
ansible.builtin.set_fact:
ceph_release: luminous
when: ceph_version.split('.')[0] is version('12', '==')
- name: set_fact ceph_release mimic
set_fact:
- name: Set_fact ceph_release mimic
ansible.builtin.set_fact:
ceph_release: mimic
when: ceph_version.split('.')[0] is version('13', '==')
- name: set_fact ceph_release nautilus
set_fact:
- name: Set_fact ceph_release nautilus
ansible.builtin.set_fact:
ceph_release: nautilus
when: ceph_version.split('.')[0] is version('14', '==')
- name: set_fact ceph_release octopus
set_fact:
- name: Set_fact ceph_release octopus
ansible.builtin.set_fact:
ceph_release: octopus
when: ceph_version.split('.')[0] is version('15', '==')
- name: set_fact ceph_release pacific
set_fact:
- name: Set_fact ceph_release pacific
ansible.builtin.set_fact:
ceph_release: pacific
when: ceph_version.split('.')[0] is version('16', '==')
- name: set_fact ceph_release quincy
set_fact:
- name: Set_fact ceph_release quincy
ansible.builtin.set_fact:
ceph_release: quincy
when: ceph_version.split('.')[0] is version('17', '==')
- name: set_fact ceph_release reef
set_fact:
- name: Set_fact ceph_release reef
ansible.builtin.set_fact:
ceph_release: reef
when: ceph_version.split('.')[0] is version('18', '==')

View File

@ -1,17 +1,17 @@
---
- name: if selinux is not disabled
- name: If selinux is not disabled
when: ansible_facts['selinux']['status'] == 'enabled'
block:
- name: install policycoreutils-python
package:
- name: Install policycoreutils-python
ansible.builtin.package:
name: policycoreutils-python
state: present
register: result
until: result is succeeded
when: ansible_facts['distribution_major_version'] == '7'
- name: install python3-policycoreutils on RHEL 8
package:
- name: Install python3-policycoreutils on RHEL 8
ansible.builtin.package:
name: python3-policycoreutils
state: present
register: result

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Guillaume Abrioux
description: Handles ceph-ansible initial configuration
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,11 +1,11 @@
---
- name: create ceph initial directories
file:
- name: Create ceph initial directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
mode: 0755
mode: "0755"
loop:
- /etc/ceph
- /var/lib/ceph/

View File

@ -1,30 +1,33 @@
---
- name: include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml
- name: Include create_ceph_initial_dirs.yml
ansible.builtin.include_tasks: create_ceph_initial_dirs.yml
when: containerized_deployment | bool
- name: include_tasks rgw_systemd_environment_file.yml
include_tasks: rgw_systemd_environment_file.yml
- name: Include_tasks rgw_systemd_environment_file.yml
ansible.builtin.include_tasks: rgw_systemd_environment_file.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: config file operations related to OSDs
- name: Config file operations related to OSDs
when:
- inventory_hostname in groups.get(osd_group_name, [])
# the rolling_update.yml playbook sets num_osds to the number of currently
# running osds
- not rolling_update | bool
block:
- name: reset num_osds
set_fact:
- name: Reset num_osds
ansible.builtin.set_fact:
num_osds: 0
- name: count number of osds for lvm scenario
set_fact:
- name: Count number of osds for lvm scenario
ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + (lvm_volumes | length | int) }}"
when: lvm_volumes | default([]) | length > 0
- block:
- name: look up for ceph-volume rejected devices
- name: Ceph-volume pre-requisites tasks
when:
- devices | default([]) | length > 0
block:
- name: Look up for ceph-volume rejected devices
ceph_volume:
cluster: "{{ cluster }}"
action: "inventory"
@ -35,17 +38,17 @@
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8
- name: set_fact rejected_devices
set_fact:
- name: Set_fact rejected_devices
ansible.builtin.set_fact:
_rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}"
with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}"
when: "'Used by ceph-disk' in item.rejected_reasons"
- name: set_fact _devices
set_fact:
- name: Set_fact _devices
ansible.builtin.set_fact:
_devices: "{{ devices | difference(_rejected_devices | default([])) }}"
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
- name: Run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}"
@ -62,23 +65,21 @@
PYTHONIOENCODING: utf-8
when: _devices | default([]) | length > 0
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report)
set_fact:
- name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report)
ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
when:
- (lvm_batch_report.stdout | default('{}') | from_json) is mapping
- (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report)
set_fact:
- name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report)
ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
when:
- (lvm_batch_report.stdout | default('{}') | from_json) is not mapping
- (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
when:
- devices | default([]) | length > 0
- name: run 'ceph-volume lvm list' to see how many osds have already been created
- name: Run 'ceph-volume lvm list' to see how many osds have already been created
ceph_volume:
action: "list"
register: lvm_list
@ -89,31 +90,31 @@
PYTHONIOENCODING: utf-8
changed_when: false
- name: set_fact num_osds (add existing osds)
set_fact:
- name: Set_fact num_osds (add existing osds)
ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + (lvm_list.stdout | default('{}') | from_json | dict2items | map(attribute='value') | flatten | map(attribute='devices') | sum(start=[]) | difference(lvm_volumes | default([]) | map(attribute='data')) | length | int) }}"
- name: set osd related config facts
- name: Set osd related config facts
when: inventory_hostname in groups.get(osd_group_name, [])
block:
- name: set_fact _osd_memory_target, override from ceph_conf_overrides
set_fact:
- name: Set_fact _osd_memory_target, override from ceph_conf_overrides
ansible.builtin.set_fact:
_osd_memory_target: "{{ item }}"
loop:
- "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}"
- "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
when: item
- name: set_fact _osd_memory_target
set_fact:
- name: Set_fact _osd_memory_target
ansible.builtin.set_fact:
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
when:
- _osd_memory_target is undefined
- num_osds | default(0) | int > 0
- ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float)
- name: create ceph conf directory
file:
- name: Create ceph conf directory
ansible.builtin.file:
path: "/etc/ceph"
state: directory
owner: "ceph"
@ -121,13 +122,13 @@
mode: "{{ ceph_directories_mode }}"
when: not containerized_deployment | bool
- name: import_role ceph-facts
import_role:
- name: Import_role ceph-facts
ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_radosgw_address.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: "generate {{ cluster }}.conf configuration file"
- name: Generate Ceph file
openstack.config_template.config_template:
src: "ceph.conf.j2"
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
@ -136,10 +137,10 @@
mode: "0644"
config_type: ini
notify:
- restart ceph mons
- restart ceph osds
- restart ceph mdss
- restart ceph rgws
- restart ceph mgrs
- restart ceph rbdmirrors
- restart ceph rbd-target-api-gw
- Restart ceph mons
- Restart ceph osds
- Restart ceph mdss
- Restart ceph rgws
- Restart ceph mgrs
- Restart ceph rbdmirrors
- Restart ceph rbd-target-api-gw

View File

@ -1,6 +1,6 @@
---
- name: create rados gateway instance directories
file:
- name: Create rados gateway instance directories
ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -8,8 +8,8 @@
mode: "{{ ceph_directories_mode | default('0755') }}"
with_items: "{{ rgw_instances }}"
- name: generate environment file
copy:
- name: Generate environment file
ansible.builtin.copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
owner: "root"
group: "root"

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Sébastien Han
description: Installs Ceph
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,6 +1,6 @@
---
- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: Pulling Ceph container image
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: docker_image
until: docker_image.rc == 0
@ -12,8 +12,8 @@
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}"
- name: "pulling alertmanager/prometheus/grafana container images"
command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}"
- name: Pulling alertmanager/prometheus/grafana container images
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}"
changed_when: false
register: monitoring_images
until: monitoring_images.rc == 0
@ -31,8 +31,8 @@
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}"
- name: "pulling node-exporter container image"
command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}"
- name: Pulling node-exporter container image
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}"
changed_when: false
register: node_exporter_image
until: node_exporter_image.rc == 0
@ -54,27 +54,29 @@
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}"
- name: export local ceph dev image
command: >
- name: Export local ceph dev image
ansible.builtin.command: >
{{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
"{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
delegate_to: localhost
changed_when: false
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true
- name: copy ceph dev image file
copy:
- name: Copy ceph dev image file
ansible.builtin.copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
mode: "0644"
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
- name: load ceph dev image
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- name: Load ceph dev image
ansible.builtin.command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
changed_when: false
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
- name: remove tmp ceph dev image file
file:
- name: Remove tmp ceph dev image file
ansible.builtin.file:
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
state: absent
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)

View File

@ -1,38 +1,39 @@
---
- name: generate systemd ceph-mon target file
copy:
- name: Generate systemd ceph-mon target file
ansible.builtin.copy:
src: ceph.target
dest: /etc/systemd/system/ceph.target
mode: "0644"
- name: enable ceph.target
service:
- name: Enable ceph.target
ansible.builtin.service:
name: ceph.target
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
- name: include prerequisites.yml
include_tasks: prerequisites.yml
- name: Include prerequisites.yml
ansible.builtin.include_tasks: prerequisites.yml
- name: include registry.yml
include_tasks: registry.yml
- name: Include registry.yml
ansible.builtin.include_tasks: registry.yml
when: ceph_docker_registry_auth | bool
- name: include fetch_image.yml
include_tasks: fetch_image.yml
- name: Include fetch_image.yml
ansible.builtin.include_tasks: fetch_image.yml
tags: fetch_container_image
- name: get ceph version
command: >
- name: Get ceph version
ansible.builtin.command: >
{{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph
{{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }}
--version
changed_when: false
check_mode: no
check_mode: false
register: ceph_version
- name: set_fact ceph_version ceph_version.stdout.split
set_fact:
- name: Set_fact ceph_version ceph_version.stdout.split
ansible.builtin.set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
- name: include release.yml
include_tasks: release.yml
- name: Include release.yml
ansible.builtin.include_tasks: release.yml

View File

@ -1,50 +1,50 @@
---
- name: lvmetad tasks related
- name: Lvmetad tasks related
when:
- inventory_hostname in groups.get(osd_group_name, [])
- lvmetad_disabled | default(False) | bool
- ansible_facts['os_family'] == 'RedHat'
- ansible_facts['distribution_major_version'] | int == 7
block:
- name: stop lvmetad
service:
- name: Stop lvmetad
ansible.builtin.service:
name: lvm2-lvmetad
state: stopped
- name: disable and mask lvmetad service
service:
- name: Disable and mask lvmetad service
ansible.builtin.systemd:
name: lvm2-lvmetad
enabled: no
masked: yes
enabled: false
masked: true
- name: remove ceph udev rules
file:
- name: Remove ceph udev rules
ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /usr/lib/udev/rules.d/95-ceph-osd.rules
- /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
- name: ensure tmpfiles.d is present
lineinfile:
- name: Ensure tmpfiles.d is present
ansible.builtin.lineinfile:
path: /etc/tmpfiles.d/ceph-common.conf
line: "d /run/ceph 0770 root root -"
owner: root
group: root
mode: 0644
mode: "0644"
state: present
create: yes
create: true
- name: restore certificates selinux context
- name: Restore certificates selinux context
when:
- ansible_facts['os_family'] == 'RedHat'
- inventory_hostname in groups.get(mon_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, [])
command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted
ansible.builtin.command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted
changed_when: false
- name: install python3 on osd nodes
package:
- name: Install python3 on osd nodes
ansible.builtin.package:
name: python3
state: present
when:

View File

@ -1,9 +1,9 @@
---
- name: container registry authentication
command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
- name: Container registry authentication
ansible.builtin.command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
args:
stdin: '{{ ceph_docker_registry_password }}'
stdin_add_newline: no
stdin_add_newline: false
changed_when: false
environment:
HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"

View File

@ -1,45 +1,45 @@
---
- name: set_fact ceph_release jewel
set_fact:
- name: Set_fact ceph_release jewel
ansible.builtin.set_fact:
ceph_release: jewel
when: ceph_version.split('.')[0] is version('10', '==')
- name: set_fact ceph_release kraken
set_fact:
- name: Set_fact ceph_release kraken
ansible.builtin.set_fact:
ceph_release: kraken
when: ceph_version.split('.')[0] is version('11', '==')
- name: set_fact ceph_release luminous
set_fact:
- name: Set_fact ceph_release luminous
ansible.builtin.set_fact:
ceph_release: luminous
when: ceph_version.split('.')[0] is version('12', '==')
- name: set_fact ceph_release mimic
set_fact:
- name: Set_fact ceph_release mimic
ansible.builtin.set_fact:
ceph_release: mimic
when: ceph_version.split('.')[0] is version('13', '==')
- name: set_fact ceph_release nautilus
set_fact:
- name: Set_fact ceph_release nautilus
ansible.builtin.set_fact:
ceph_release: nautilus
when: ceph_version.split('.')[0] is version('14', '==')
- name: set_fact ceph_release octopus
set_fact:
- name: Set_fact ceph_release octopus
ansible.builtin.set_fact:
ceph_release: octopus
when: ceph_version.split('.')[0] is version('15', '==')
- name: set_fact ceph_release pacific
set_fact:
- name: Set_fact ceph_release pacific
ansible.builtin.set_fact:
ceph_release: pacific
when: ceph_version.split('.')[0] is version('16', '==')
- name: set_fact ceph_release quincy
set_fact:
- name: Set_fact ceph_release quincy
ansible.builtin.set_fact:
ceph_release: quincy
when: ceph_version.split('.')[0] is version('17', '==')
- name: set_fact ceph_release reef
set_fact:
- name: Set_fact ceph_release reef
ansible.builtin.set_fact:
ceph_release: reef
when: ceph_version.split('.')[0] is version('18', '==')

View File

@ -4,14 +4,14 @@ galaxy_info:
author: Guillaume Abrioux
description: Handles container installation prerequisites
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: Ubuntu
versions:
- xenial
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,4 +1,4 @@
---
- name: include pre_requisites/prerequisites.yml
include_tasks: pre_requisites/prerequisites.yml
- name: Include pre_requisites/prerequisites.yml
ansible.builtin.include_tasks: pre_requisites/prerequisites.yml
when: not is_atomic | bool

View File

@ -1,31 +1,31 @@
---
- name: uninstall old docker versions
package:
- name: Uninstall old docker versions
ansible.builtin.package:
name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc']
state: absent
when: container_package_name == 'docker-ce'
- name: allow apt to use a repository over https (debian)
package:
- name: Allow apt to use a repository over https (debian)
ansible.builtin.package:
name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
update_cache: yes
update_cache: true
register: result
until: result is succeeded
- name: add docker's gpg key
apt_key:
- name: Add docker's gpg key
ansible.builtin.apt_key:
url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
register: result
until: result is succeeded
when: container_package_name == 'docker-ce'
- name: add docker repository
apt_repository:
- name: Add docker repository
ansible.builtin.apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
when: container_package_name == 'docker-ce'
- name: add podman ppa repository
apt_repository:
- name: Add podman ppa repository
ansible.builtin.apt_repository:
repo: "ppa:projectatomic/ppa"
when:
- container_package_name == 'podman'

View File

@ -1,54 +1,55 @@
---
- name: include specific variables
include_vars: "{{ item }}"
- name: Include specific variables
ansible.builtin.include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
- "{{ ansible_facts['os_family'] }}.yml"
when: container_package_name is undefined and container_service_name is undefined
- name: debian based systems tasks
include_tasks: debian_prerequisites.yml
- name: Debian based systems tasks
ansible.builtin.include_tasks: debian_prerequisites.yml
when:
- ansible_facts['os_family'] == 'Debian'
tags: with_pkg
- name: install container packages
package:
- name: Install container packages
ansible.builtin.package:
name: '{{ container_package_name }}'
update_cache: true
register: result
until: result is succeeded
tags: with_pkg
- name: install lvm2 package
package:
- name: Install lvm2 package
ansible.builtin.package:
name: lvm2
register: result
until: result is succeeded
tags: with_pkg
when: inventory_hostname in groups.get(osd_group_name, [])
- name: extra configuration for docker
- name: Extra configuration for docker
when: container_service_name == 'docker'
block:
- name: create the systemd docker override directory
file:
- name: Create the systemd docker override directory
ansible.builtin.file:
path: /etc/systemd/system/docker.service.d
state: directory
mode: "0755"
when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
- name: create the systemd docker override file
template:
- name: Create the systemd docker override file
ansible.builtin.template:
src: docker-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/proxy.conf
mode: 0600
mode: "0600"
owner: root
group: root
register: proxy_created
when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
- name: remove docker proxy configuration
file:
- name: Remove docker proxy configuration
ansible.builtin.file:
path: /etc/systemd/system/docker.service.d/proxy.conf
state: absent
register: proxy_removed
@ -60,17 +61,17 @@
# have an immediate effect and not wait the end of the play.
# using flush_handlers via the meta action plugin isn't enough too because
# it flushes all handlers and not only the one notified in this role.
- name: restart docker
systemd:
- name: Restart docker
ansible.builtin.systemd:
name: "{{ container_service_name }}"
state: restarted
daemon_reload: yes
daemon_reload: true
when: proxy_created.changed | bool or proxy_removed.changed | bool
- name: start container service
service:
- name: Start container service
ansible.builtin.service:
name: '{{ container_service_name }}'
state: started
enabled: yes
enabled: true
tags:
with_pkg

View File

@ -4,12 +4,11 @@ galaxy_info:
author: Guillaume Abrioux
description: Deploy ceph-crash
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- 7
- 8
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,8 +1,8 @@
---
- name: create and copy client.crash keyring
- name: Create and copy client.crash keyring
when: cephx | bool
block:
- name: create client.crash keyring
- name: Create client.crash keyring
ceph_key:
name: "client.crash"
caps:
@ -10,7 +10,7 @@
mgr: 'allow profile crash'
cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}"
import_key: True
import_key: true
mode: "{{ ceph_keyring_permissions }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -18,10 +18,10 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
run_once: True
run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: get keys from monitors
- name: Get keys from monitors
ceph_key:
name: client.crash
cluster: "{{ cluster }}"
@ -35,8 +35,8 @@
run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: copy ceph key(s) if needed
copy:
- name: Copy ceph key(s) if needed
ansible.builtin.copy:
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring"
content: "{{ _crash_keys.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -44,24 +44,24 @@
mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: start ceph-crash daemon
- name: Start ceph-crash daemon
when: containerized_deployment | bool
block:
- name: create /var/lib/ceph/crash/posted
file:
- name: Create /var/lib/ceph/crash/posted
ansible.builtin.file:
path: /var/lib/ceph/crash/posted
state: directory
mode: '0755'
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
- name: include_tasks systemd.yml
include_tasks: systemd.yml
- name: Include_tasks systemd.yml
ansible.builtin.include_tasks: systemd.yml
- name: start the ceph-crash service
systemd:
- name: Start the ceph-crash service
ansible.builtin.systemd:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: started
enabled: yes
masked: no
daemon_reload: yes
enabled: true
masked: false
daemon_reload: true

View File

@ -1,9 +1,9 @@
---
- name: generate systemd unit file for ceph-crash container
template:
- name: Generate systemd unit file for ceph-crash container
ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-crash.service.j2"
dest: /etc/systemd/system/ceph-crash@.service
owner: "root"
group: "root"
mode: "0644"
notify: restart ceph crash
notify: Restart ceph crash

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Boris Ranto
description: Configures Ceph Dashboard
license: Apache
min_ansible_version: 2.4
min_ansible_version: '2.4'
platforms:
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,36 +1,38 @@
---
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
delegate_to: "{{ groups[mon_group_name][0] }}"
delegate_facts: true
- name: set_fact container_exec_cmd
set_fact:
- name: Set_fact container_exec_cmd
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: set_fact container_run_cmd
set_fact:
- name: Set_fact container_run_cmd
ansible.builtin.set_fact:
ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
- name: get current mgr backend - ipv4
set_fact:
- name: Get current mgr backend - ipv4
ansible.builtin.set_fact:
dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}"
when: ip_version == 'ipv4'
loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
delegate_to: "{{ item }}"
delegate_facts: True
delegate_facts: true
- name: get current mgr backend - ipv6
set_fact:
- name: Get current mgr backend - ipv6
ansible.builtin.set_fact:
dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}"
when: ip_version == 'ipv6'
loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
delegate_to: "{{ item }}"
delegate_facts: True
delegate_facts: true
- include_role:
- name: Include ceph-facts role
ansible.builtin.include_role:
name: ceph-facts
tasks_from: set_radosgw_address.yml
loop: "{{ groups.get(rgw_group_name, []) }}"
@ -39,100 +41,103 @@
loop_var: ceph_dashboard_call_item
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: disable SSL for dashboard
- name: Disable SSL for dashboard
when: dashboard_protocol == "http"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
block:
- name: get SSL status for dashboard
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl"
changed_when: false
register: current_ssl_for_dashboard
- name: Get SSL status for dashboard
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl"
changed_when: false
register: current_ssl_for_dashboard
- name: disable SSL for dashboard
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false"
when: current_ssl_for_dashboard.stdout == "true"
- name: Disable SSL for dashboard
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false"
changed_when: false
when: current_ssl_for_dashboard.stdout == "true"
- name: with SSL for dashboard
- name: With SSL for dashboard
when: dashboard_protocol == "https"
block:
- name: enable SSL for dashboard
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true"
- name: Enable SSL for dashboard
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- name: copy dashboard SSL certificate file
copy:
- name: Copy dashboard SSL certificate file
ansible.builtin.copy:
src: "{{ dashboard_crt }}"
dest: "/etc/ceph/ceph-dashboard.crt"
owner: root
group: root
mode: 0440
mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: dashboard_crt | length > 0
- name: copy dashboard SSL certificate key
copy:
- name: Copy dashboard SSL certificate key
ansible.builtin.copy:
src: "{{ dashboard_key }}"
dest: "/etc/ceph/ceph-dashboard.key"
owner: root
group: root
mode: 0440
mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: dashboard_key | length > 0
- name: generate and copy self-signed certificate
- name: Generate and copy self-signed certificate
when: dashboard_key | length == 0 or dashboard_crt | length == 0
run_once: true
block:
- name: set_fact subj_alt_names
set_fact:
- name: Set_fact subj_alt_names
ansible.builtin.set_fact:
subj_alt_names: >
{% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%}
DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}
{%- endfor -%}
{% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}{%- endfor -%}
- name: create tempfile for openssl certificate and key generation
tempfile:
- name: Create tempfile for openssl certificate and key generation
ansible.builtin.tempfile:
state: file
register: openssl_config_file
- name: copy the openssl configuration file
copy:
- name: Copy the openssl configuration file
ansible.builtin.copy:
src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}"
dest: '{{ openssl_config_file.path }}'
remote_src: true
mode: "0644"
- name: add subjectAltName to the openssl configuration
ini_file:
- name: Add subjectAltName to the openssl configuration
community.general.ini_file:
path: '{{ openssl_config_file.path }}'
section: v3_ca
option: subjectAltName
value: '{{ subj_alt_names | trim }}'
mode: "0644"
- name: generate a Self Signed OpenSSL certificate for dashboard
shell: |
- name: Generate a Self Signed OpenSSL certificate for dashboard
ansible.builtin.shell: |
test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \
openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
changed_when: false
- name: remove the openssl tempfile
file:
- name: Remove the openssl tempfile
ansible.builtin.file:
path: '{{ openssl_config_file.path }}'
state: absent
- name: slurp self-signed generated certificate for dashboard
slurp:
- name: Slurp self-signed generated certificate for dashboard
ansible.builtin.slurp:
src: "/etc/ceph/{{ item }}"
run_once: True
run_once: true
with_items:
- 'ceph-dashboard.key'
- 'ceph-dashboard.crt'
register: slurp_self_signed_crt
- name: copy self-signed generated certificate on mons
copy:
- name: Copy self-signed generated certificate on mons
ansible.builtin.copy:
dest: "{{ item.0.source }}"
content: "{{ item.0.content | b64decode }}"
owner: "{{ ceph_uid }}"
@ -143,39 +148,39 @@
- "{{ slurp_self_signed_crt.results }}"
- "{{ groups[mon_group_name] }}"
- name: import dashboard certificate file
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
- name: Import dashboard certificate file
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: import dashboard certificate key
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key"
- name: Import dashboard certificate key
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: "set the dashboard port ({{ dashboard_port }})"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}"
- name: Set the dashboard port
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: "set the dashboard SSL port ({{ dashboard_port }})"
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}"
- name: Set the dashboard SSL port
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0
- name: config the current dashboard backend
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}"
- name: Config the current dashboard backend
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
run_once: true
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
- name: disable mgr dashboard module (restart)
- name: Disable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
@ -186,7 +191,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: enable mgr dashboard module (restart)
- name: Enable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
@ -197,7 +202,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: create dashboard admin user
- name: Create dashboard admin user
ceph_dashboard_user:
name: "{{ dashboard_admin_user }}"
cluster: "{{ cluster }}"
@ -209,30 +214,30 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: disable unused dashboard features
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}"
- name: Disable unused dashboard features
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
with_items: "{{ dashboard_disabled_features }}"
- name: set grafana api user
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}"
- name: Set grafana api user
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- name: set grafana api password
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -"
- name: Set grafana api password
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -"
args:
stdin: "{{ grafana_admin_password }}"
stdin_add_newline: no
stdin_add_newline: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- name: disable ssl verification for grafana
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False"
- name: Disable ssl verification for grafana
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
@ -240,101 +245,102 @@
- dashboard_protocol == "https"
- dashboard_grafana_api_no_ssl_verify | bool
- name: set alertmanager host
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}"
- name: Set alertmanager host
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- name: set prometheus host
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}"
- name: Set prometheus host
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- include_tasks: configure_grafana_layouts.yml
- name: Include grafana layout tasks
ansible.builtin.include_tasks: configure_grafana_layouts.yml
with_items: '{{ grafana_server_addrs }}'
vars:
grafana_server_addr: '{{ item }}'
- name: config monitoring api url vip
- name: Config monitoring api url vip
run_once: true
block:
- name: config grafana api url vip
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}"
- name: Config grafana api url vip
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0
- name: config alertmanager api url
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}"
- name: Config alertmanager api url
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0
- name: config prometheus api url
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}"
- name: Config prometheus api url
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0
- name: dashboard object gateway management frontend
- name: Dashboard object gateway management frontend
when: groups.get(rgw_group_name, []) | length > 0
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- name: set the rgw credentials
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials"
- name: Set the rgw credentials
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials"
changed_when: false
register: result
until: result is succeeded
retries: 5
- name: set the rgw admin resource
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
- name: Set the rgw admin resource
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
changed_when: false
when: dashboard_rgw_api_admin_resource | length > 0
- name: disable ssl verification for rgw
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False"
- name: Disable ssl verification for rgw
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False"
changed_when: false
when:
- dashboard_rgw_api_no_ssl_verify | bool
- radosgw_frontend_ssl_certificate | length > 0
- name: dashboard iscsi management
- name: Dashboard iscsi management
when: groups.get(iscsi_gw_group_name, []) | length > 0
run_once: true
block:
- name: disable iscsi api ssl verification
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
- name: Disable iscsi api ssl verification
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- api_secure | default(false) | bool
- generate_crt | default(false) | bool
- name: add iscsi gateways - ipv4
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
- name: Add iscsi gateways - ipv4
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no
stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv4'
- name: add iscsi gateways - ipv6
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
- name: Add iscsi gateways - ipv6
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ansible.utils.ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no
stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv6'
- name: disable mgr dashboard module (restart)
- name: Disable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
@ -345,7 +351,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: enable mgr dashboard module (restart)
- name: Enable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"

View File

@ -1,12 +1,12 @@
---
- name: set grafana url
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
- name: Set grafana url
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- name: inject grafana dashboard layouts
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
- name: Inject grafana dashboard layouts
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false

View File

@ -1,8 +1,8 @@
---
- name: include configure_dashboard.yml
include_tasks: configure_dashboard.yml
- name: Include configure_dashboard.yml
ansible.builtin.include_tasks: configure_dashboard.yml
- name: print dashboard URL
debug:
- name: Print dashboard URL
ansible.builtin.debug:
msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
run_once: true

View File

@ -66,7 +66,7 @@ adopt_label_group_names:
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
# with each others.
configure_firewall: True
configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it
ceph_mon_firewall_zone: public
@ -112,7 +112,7 @@ ntp_daemon_type: chronyd
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
upgrade_ceph_packages: False
upgrade_ceph_packages: false
ceph_use_distro_backports: false # DEBIAN ONLY
ceph_directories_mode: "0755"
@ -163,7 +163,7 @@ libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubun
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -221,7 +221,7 @@ ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
#
#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
ceph_custom_repo: https://server.domain.com/ceph-custom-repo
@ -230,14 +230,14 @@ ceph_custom_repo: https://server.domain.com/ceph-custom-repo
# Enabled when ceph_repository == 'local'
#
# Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/"
# ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
#use_installer: false
# use_installer: false
# Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible"
# ansible_dir: "/path/to/ceph-ansible"
######################
@ -320,12 +320,12 @@ monitor_address_block: subnet
ip_version: ipv4
mon_host_v1:
enabled: True
enabled: true
suffix: ':6789'
mon_host_v2:
suffix: ':3300'
enable_ceph_volume_debug: False
enable_ceph_volume_debug: false
##########
# CEPHFS #
@ -397,7 +397,7 @@ email_address: foo@bar.com
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
# common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
@ -519,16 +519,16 @@ ceph_docker_image: "ceph/daemon-base"
ceph_docker_image_tag: latest-main
ceph_docker_registry: quay.io
ceph_docker_registry_auth: false
#ceph_docker_registry_username:
#ceph_docker_registry_password:
#ceph_docker_http_proxy:
#ceph_docker_https_proxy:
# ceph_docker_registry_username:
# ceph_docker_registry_password:
# ceph_docker_http_proxy:
# ceph_docker_https_proxy:
ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }}
ceph_client_docker_image: "{{ ceph_docker_image }}"
ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
ceph_client_docker_registry: "{{ ceph_docker_registry }}"
containerized_deployment: False
containerized_deployment: false
container_binary:
timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
@ -555,7 +555,7 @@ openstack_config: false
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
# pg_autoscale_mode: False
# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
@ -605,7 +605,7 @@ openstack_keys:
#############
# DASHBOARD #
#############
dashboard_enabled: True
dashboard_enabled: true
# Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
@ -617,7 +617,7 @@ dashboard_network: "{{ public_network }}"
dashboard_admin_user: admin
dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True
#dashboard_admin_password: p@ssw0rd
# dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections
dashboard_crt: ''
dashboard_key: ''
@ -626,7 +626,7 @@ dashboard_tls_external: false
dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
dashboard_rgw_api_user_id: ceph-dashboard
dashboard_rgw_api_admin_resource: ''
dashboard_rgw_api_no_ssl_verify: False
dashboard_rgw_api_no_ssl_verify: false
dashboard_frontend_vip: ''
dashboard_disabled_features: []
prometheus_frontend_vip: ''
@ -635,7 +635,7 @@ node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0"
node_exporter_port: 9100
grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True
#grafana_admin_password: admin
# grafana_admin_password: admin
# We only need this for SSL (https) connections
grafana_crt: ''
grafana_key: ''
@ -667,7 +667,7 @@ grafana_dashboard_files:
grafana_plugins:
- vonage-status-panel
- grafana-piechart-panel
grafana_allow_embedding: True
grafana_allow_embedding: true
grafana_port: 3000
grafana_network: "{{ public_network }}"
grafana_conf_overrides: {}
@ -683,7 +683,7 @@ prometheus_port: 9092
prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data.
#prometheus_storage_tsdb_retention_time: 15d
# prometheus_storage_tsdb_retention_time: 15d
alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
alertmanager_container_cpu_period: 100000
alertmanager_container_cpu_cores: 2
@ -741,11 +741,11 @@ gateway_ip_list: 0.0.0.0
#
# Example:
#
#rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
# rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
@ -759,20 +759,19 @@ rbd_devices: {}
#
# Example:
#
#client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
# client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
client_connections: {}
no_log_on_ceph_key_tasks: True
no_log_on_ceph_key_tasks: true
###############
# DEPRECATION #
###############
######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM #
@ -780,4 +779,4 @@ no_log_on_ceph_key_tasks: True
container_exec_cmd:
docker: false
ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -4,14 +4,14 @@ galaxy_info:
author: Sébastien Han
description: Handles ceph-ansible default vars for all roles
license: Apache
min_ansible_version: 2.7
min_ansible_version: '2.7'
platforms:
- name: Ubuntu
versions:
- xenial
- name: EL
versions:
- 7
- 'all'
galaxy_tags:
- system
dependencies: []

Some files were not shown because too many files have changed in this diff Show More