address Ansible linter errors

This addresses all errors reported by the Ansible linter.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
pull/7476/head
Guillaume Abrioux 2024-02-14 11:14:02 +01:00
parent 7d25a5d565
commit 18da10bb7a
245 changed files with 5490 additions and 4948 deletions

View File

@ -10,10 +10,7 @@ jobs:
with: with:
python-version: '3.10' python-version: '3.10'
architecture: x64 architecture: x64
- run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint==6.16.0 netaddr - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint netaddr
- run: ansible-galaxy install -r requirements.yml - run: ansible-galaxy install -r requirements.yml
- run: ansible-lint -x 106,204,205,208 -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site-container.yml.sample dashboard.yml - run: ansible-lint -x 'yaml[line-length],role-name,run-once' -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site.yml.sample dashboard.yml
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample --syntax-check --list-tasks -vv - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample site-container.yml.sample dashboard.yml infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts site-container.yml.sample --syntax-check --list-tasks -vv
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts dashboard.yml --syntax-check --list-tasks -vv
- run: ansible-playbook -i ./tests/functional/all_daemons/hosts infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv

View File

@ -1,5 +1,6 @@
--- ---
- hosts: - name: Deploy node_exporter
hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}" - "{{ osd_group_name|default('osds') }}"
- "{{ mds_group_name|default('mdss') }}" - "{{ mds_group_name|default('mdss') }}"
@ -12,75 +13,91 @@
gather_facts: false gather_facts: false
become: true become: true
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
tags: ['ceph_update_config'] tags: ['ceph_update_config']
- name: set ceph node exporter install 'In Progress' - name: Set ceph node exporter install 'In Progress'
run_once: true run_once: true
set_stats: ansible.builtin.set_stats:
data: data:
installer_phase_ceph_node_exporter: installer_phase_ceph_node_exporter:
status: "In Progress" status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks: tasks:
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tags: ['ceph_update_config'] tags: ['ceph_update_config']
- import_role:
- name: Import ceph-container-engine
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
tasks_from: registry tasks_from: registry
when: when:
- not containerized_deployment | bool - not containerized_deployment | bool
- ceph_docker_registry_auth | bool - ceph_docker_registry_auth | bool
- import_role:
- name: Import ceph-node-exporter role
ansible.builtin.import_role:
name: ceph-node-exporter name: ceph-node-exporter
post_tasks: post_tasks:
- name: set ceph node exporter install 'Complete' - name: Set ceph node exporter install 'Complete'
run_once: true run_once: true
set_stats: ansible.builtin.set_stats:
data: data:
installer_phase_ceph_node_exporter: installer_phase_ceph_node_exporter:
status: "Complete" status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: "{{ monitoring_group_name | default('monitoring') }}" - name: Deploy grafana and prometheus
hosts: "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false gather_facts: false
become: true become: true
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
tags: ['ceph_update_config'] tags: ['ceph_update_config']
- name: set ceph grafana install 'In Progress' - name: Set ceph grafana install 'In Progress'
run_once: true run_once: true
set_stats: ansible.builtin.set_stats:
data: data:
installer_phase_ceph_grafana: installer_phase_ceph_grafana:
status: "In Progress" status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks: tasks:
- import_role: # - ansible.builtin.import_role:
name: ceph-facts # name: ceph-facts
tags: ['ceph_update_config'] # tags: ['ceph_update_config']
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: grafana tasks_from: grafana
tags: ['ceph_update_config'] tags: ['ceph_update_config']
- import_role:
- name: Import ceph-prometheus role
ansible.builtin.import_role:
name: ceph-prometheus name: ceph-prometheus
- import_role:
- name: Import ceph-grafana role
ansible.builtin.import_role:
name: ceph-grafana name: ceph-grafana
post_tasks: post_tasks:
- name: set ceph grafana install 'Complete' - name: Set ceph grafana install 'Complete'
run_once: true run_once: true
set_stats: ansible.builtin.set_stats:
data: data:
installer_phase_ceph_grafana: installer_phase_ceph_grafana:
status: "Complete" status: "Complete"
@ -88,37 +105,44 @@
# using groups[] here otherwise it can't fallback to the mon if there's no mgr group. # using groups[] here otherwise it can't fallback to the mon if there's no mgr group.
# adding an additional | default(omit) in case where no monitors are present (external ceph cluster) # adding an additional | default(omit) in case where no monitors are present (external ceph cluster)
- hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}" - name: Deploy dashboard
hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}"
gather_facts: false gather_facts: false
become: true become: true
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
tags: ['ceph_update_config'] tags: ['ceph_update_config']
- name: set ceph dashboard install 'In Progress' - name: Set ceph dashboard install 'In Progress'
run_once: true run_once: true
set_stats: ansible.builtin.set_stats:
data: data:
installer_phase_ceph_dashboard: installer_phase_ceph_dashboard:
status: "In Progress" status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks: tasks:
- import_role: # - name: Import ceph-facts role
name: ceph-facts # ansible.builtin.import_role:
tags: ['ceph_update_config'] # name: ceph-facts
- import_role: # tags: ['ceph_update_config']
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: grafana tasks_from: grafana
tags: ['ceph_update_config'] tags: ['ceph_update_config']
- import_role:
- name: Import ceph-dashboard role
ansible.builtin.import_role:
name: ceph-dashboard name: ceph-dashboard
post_tasks: post_tasks:
- name: set ceph dashboard install 'Complete' - name: Set ceph dashboard install 'Complete'
run_once: true run_once: true
set_stats: ansible.builtin.set_stats:
data: data:
installer_phase_ceph_dashboard: installer_phase_ceph_dashboard:
status: "Complete" status: "Complete"

View File

@ -74,7 +74,7 @@ dummy:
# If configure_firewall is true, then ansible will try to configure the # If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate # appropriate firewalling rules so that Ceph daemons can communicate
# with each others. # with each others.
#configure_firewall: True #configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it # Open ports on corresponding nodes if firewall is installed on it
#ceph_mon_firewall_zone: public #ceph_mon_firewall_zone: public
@ -120,7 +120,7 @@ dummy:
# This variable determines if ceph packages can be updated. If False, the # This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use # package resources will use "state=present". If True, they will use
# "state=latest". # "state=latest".
#upgrade_ceph_packages: False #upgrade_ceph_packages: false
#ceph_use_distro_backports: false # DEBIAN ONLY #ceph_use_distro_backports: false # DEBIAN ONLY
#ceph_directories_mode: "0755" #ceph_directories_mode: "0755"
@ -171,7 +171,7 @@ dummy:
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0) # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -229,7 +229,7 @@ dummy:
# a URL to the .repo file to be installed on the targets. For deb, # a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base. # ceph_custom_repo should be the URL to the repo base.
# #
#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc # ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo #ceph_custom_repo: https://server.domain.com/ceph-custom-repo
@ -238,14 +238,14 @@ dummy:
# Enabled when ceph_repository == 'local' # Enabled when ceph_repository == 'local'
# #
# Path to DESTDIR of the ceph install # Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/" # ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh # Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine # This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have # If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed # all runtime dependencies installed
#use_installer: false # use_installer: false
# Root directory for ceph-ansible # Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible" # ansible_dir: "/path/to/ceph-ansible"
###################### ######################
@ -328,12 +328,12 @@ dummy:
#ip_version: ipv4 #ip_version: ipv4
#mon_host_v1: #mon_host_v1:
# enabled: True # enabled: true
# suffix: ':6789' # suffix: ':6789'
#mon_host_v2: #mon_host_v2:
# suffix: ':3300' # suffix: ':3300'
#enable_ceph_volume_debug: False #enable_ceph_volume_debug: false
########## ##########
# CEPHFS # # CEPHFS #
@ -405,7 +405,7 @@ dummy:
## Testing mode ## Testing mode
# enable this mode _only_ when you have a single node # enable this mode _only_ when you have a single node
# if you don't want it keep the option commented # if you don't want it keep the option commented
#common_single_host_mode: true # common_single_host_mode: true
## Handlers - restarting daemons after a config change ## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes # if for whatever reasons the content of your ceph configuration changes
@ -527,16 +527,16 @@ dummy:
#ceph_docker_image_tag: latest-main #ceph_docker_image_tag: latest-main
#ceph_docker_registry: quay.io #ceph_docker_registry: quay.io
#ceph_docker_registry_auth: false #ceph_docker_registry_auth: false
#ceph_docker_registry_username: # ceph_docker_registry_username:
#ceph_docker_registry_password: # ceph_docker_registry_password:
#ceph_docker_http_proxy: # ceph_docker_http_proxy:
#ceph_docker_https_proxy: # ceph_docker_https_proxy:
#ceph_docker_no_proxy: "localhost,127.0.0.1" #ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }} ## Client only docker image - defaults to {{ ceph_docker_image }}
#ceph_client_docker_image: "{{ ceph_docker_image }}" #ceph_client_docker_image: "{{ ceph_docker_image }}"
#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" #ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
#ceph_client_docker_registry: "{{ ceph_docker_registry }}" #ceph_client_docker_registry: "{{ ceph_docker_registry }}"
#containerized_deployment: False #containerized_deployment: false
#container_binary: #container_binary:
#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" #timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
@ -563,7 +563,7 @@ dummy:
# name: "images" # name: "images"
# rule_name: "my_replicated_rule" # rule_name: "my_replicated_rule"
# application: "rbd" # application: "rbd"
# pg_autoscale_mode: False # pg_autoscale_mode: false
# pg_num: 16 # pg_num: 16
# pgp_num: 16 # pgp_num: 16
# target_size_ratio: 0.2 # target_size_ratio: 0.2
@ -613,7 +613,7 @@ dummy:
############# #############
# DASHBOARD # # DASHBOARD #
############# #############
#dashboard_enabled: True #dashboard_enabled: true
# Choose http or https # Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key # For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '', # If you define the dashboard_crt and dashboard_key variables, but leave them as '',
@ -625,7 +625,7 @@ dummy:
#dashboard_admin_user: admin #dashboard_admin_user: admin
#dashboard_admin_user_ro: false #dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True # This variable must be set with a strong custom password when dashboard_enabled is True
#dashboard_admin_password: p@ssw0rd # dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections # We only need this for SSL (https) connections
#dashboard_crt: '' #dashboard_crt: ''
#dashboard_key: '' #dashboard_key: ''
@ -634,7 +634,7 @@ dummy:
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" #dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_rgw_api_user_id: ceph-dashboard #dashboard_rgw_api_user_id: ceph-dashboard
#dashboard_rgw_api_admin_resource: '' #dashboard_rgw_api_admin_resource: ''
#dashboard_rgw_api_no_ssl_verify: False #dashboard_rgw_api_no_ssl_verify: false
#dashboard_frontend_vip: '' #dashboard_frontend_vip: ''
#dashboard_disabled_features: [] #dashboard_disabled_features: []
#prometheus_frontend_vip: '' #prometheus_frontend_vip: ''
@ -643,7 +643,7 @@ dummy:
#node_exporter_port: 9100 #node_exporter_port: 9100
#grafana_admin_user: admin #grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True # This variable must be set with a strong custom password when dashboard_enabled is True
#grafana_admin_password: admin # grafana_admin_password: admin
# We only need this for SSL (https) connections # We only need this for SSL (https) connections
#grafana_crt: '' #grafana_crt: ''
#grafana_key: '' #grafana_key: ''
@ -675,7 +675,7 @@ dummy:
#grafana_plugins: #grafana_plugins:
# - vonage-status-panel # - vonage-status-panel
# - grafana-piechart-panel # - grafana-piechart-panel
#grafana_allow_embedding: True #grafana_allow_embedding: true
#grafana_port: 3000 #grafana_port: 3000
#grafana_network: "{{ public_network }}" #grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {} #grafana_conf_overrides: {}
@ -691,7 +691,7 @@ dummy:
#prometheus_conf_overrides: {} #prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage. # Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data. # set it to '30d' if you want to retain 30 days of data.
#prometheus_storage_tsdb_retention_time: 15d # prometheus_storage_tsdb_retention_time: 15d
#alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" #alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
#alertmanager_container_cpu_period: 100000 #alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2 #alertmanager_container_cpu_cores: 2
@ -749,11 +749,11 @@ dummy:
# #
# Example: # Example:
# #
#rbd_devices: # rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {} #rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs # client_connections defines the client ACL's to restrict client access to specific LUNs
@ -767,20 +767,19 @@ dummy:
# #
# Example: # Example:
# #
#client_connections: # client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } # - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } # - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {} #client_connections: {}
#no_log_on_ceph_key_tasks: True #no_log_on_ceph_key_tasks: true
############### ###############
# DEPRECATION # # DEPRECATION #
############### ###############
###################################################### ######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM # # *DO NOT* MODIFY THEM #
@ -788,5 +787,5 @@ dummy:
#container_exec_cmd: #container_exec_cmd:
#docker: false #docker: false
#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" #ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -45,6 +45,6 @@ dummy:
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ... # - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
#keys: #keys:
# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" } # - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" } # - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }

View File

@ -13,13 +13,13 @@ dummy:
# GENERAL # # GENERAL #
########### ###########
# Whether or not to generate secure certificate to iSCSI gateway nodes # Whether or not to generate secure certificate to iSCSI gateway nodes
#generate_crt: False #generate_crt: false
#iscsi_conf_overrides: {} #iscsi_conf_overrides: {}
#iscsi_pool_name: rbd #iscsi_pool_name: rbd
#iscsi_pool_size: 3 # iscsi_pool_size: 3
#copy_admin_key: True #copy_admin_key: true
################## ##################
# RBD-TARGET-API # # RBD-TARGET-API #

View File

@ -43,7 +43,7 @@ dummy:
# ceph_mds_systemd_overrides will override the systemd settings # ceph_mds_systemd_overrides will override the systemd settings
# for the ceph-mds services. # for the ceph-mds services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:
#ceph_mds_systemd_overrides: # ceph_mds_systemd_overrides:
# Service: # Service:
# PrivateDevices: False # PrivateDevices: false

View File

@ -54,7 +54,7 @@ dummy:
# ceph_mgr_systemd_overrides will override the systemd settings # ceph_mgr_systemd_overrides will override the systemd settings
# for the ceph-mgr services. # for the ceph-mgr services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:
#ceph_mgr_systemd_overrides: # ceph_mgr_systemd_overrides:
# Service: # Service:
# PrivateDevices: False # PrivateDevices: false

View File

@ -64,7 +64,7 @@ dummy:
# ceph_mon_systemd_overrides will override the systemd settings # ceph_mon_systemd_overrides will override the systemd settings
# for the ceph-mon services. # for the ceph-mon services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:
#ceph_mon_systemd_overrides: # ceph_mon_systemd_overrides:
# Service: # Service:
# PrivateDevices: False # PrivateDevices: false

View File

@ -92,8 +92,8 @@ dummy:
#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p" #ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
# Note: keys are optional and can be generated, but not on containerized, where # Note: keys are optional and can be generated, but not on containerized, where
# they must be configered. # they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" # ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" # ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} #rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
################### ###################
@ -106,19 +106,19 @@ dummy:
# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example # https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
# #
# Example: # Example:
#CACHEINODE { # CACHEINODE {
# #Entries_HWMark = 100000; # # Entries_HWMark = 100000;
#} # }
# #
#ganesha_core_param_overrides: # ganesha_core_param_overrides:
#ganesha_ceph_export_overrides: # ganesha_ceph_export_overrides:
#ganesha_rgw_export_overrides: # ganesha_rgw_export_overrides:
#ganesha_rgw_section_overrides: # ganesha_rgw_section_overrides:
#ganesha_log_overrides: # ganesha_log_overrides:
#ganesha_conf_overrides: | # ganesha_conf_overrides: |
# CACHEINODE { # CACHEINODE {
# #Entries_HWMark = 100000; # # Entries_HWMark = 100000;
# } # }
########## ##########
# DOCKER # # DOCKER #

View File

@ -33,31 +33,31 @@ dummy:
# All scenario(except 3rd) inherit from the following device declaration # All scenario(except 3rd) inherit from the following device declaration
# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs # Note: This scenario uses the ceph-volume lvm batch method to provision OSDs
#devices: # devices:
# - /dev/sdb # - /dev/sdb
# - /dev/sdc # - /dev/sdc
# - /dev/sdd # - /dev/sdd
# - /dev/sde # - /dev/sde
#devices: [] #devices: []
# Declare devices to be used as block.db devices # Declare devices to be used as block.db devices
#dedicated_devices: # dedicated_devices:
# - /dev/sdx # - /dev/sdx
# - /dev/sdy # - /dev/sdy
#dedicated_devices: [] #dedicated_devices: []
# Declare devices to be used as block.wal devices # Declare devices to be used as block.wal devices
#bluestore_wal_devices: # bluestore_wal_devices:
# - /dev/nvme0n1 # - /dev/nvme0n1
# - /dev/nvme0n2 # - /dev/nvme0n2
#bluestore_wal_devices: [] #bluestore_wal_devices: []
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. # 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks # which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using # found will be passed to ceph-volume lvm batch. You should not be worried on using
@ -68,7 +68,7 @@ dummy:
# Encrypt your OSD device using dmcrypt # Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore you use the data will be encrypted # If set to True, no matter which osd_objecstore you use the data will be encrypted
#dmcrypt: False #dmcrypt: true
# Use ceph-volume to create OSDs from logical volumes. # Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries. # lvm_volumes is a list of dictionaries.
@ -177,8 +177,8 @@ dummy:
# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16 # NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17 # NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
# then, the following would run the OSD on the first NUMA node only. # then, the following would run the OSD on the first NUMA node only.
#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" # ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
#ceph_osd_docker_cpuset_mems: "0" # ceph_osd_docker_cpuset_mems: "0"
# PREPARE DEVICE # PREPARE DEVICE
# #
@ -199,9 +199,9 @@ dummy:
# ceph_osd_systemd_overrides will override the systemd settings # ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services. # for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:
#ceph_osd_systemd_overrides: # ceph_osd_systemd_overrides:
# Service: # Service:
# PrivateDevices: False # PrivateDevices: false
########### ###########

View File

@ -49,7 +49,7 @@ dummy:
# ceph_rbd_mirror_systemd_overrides will override the systemd settings # ceph_rbd_mirror_systemd_overrides will override the systemd settings
# for the ceph-rbd-mirror services. # for the ceph-rbd-mirror services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:
#ceph_rbd_mirror_systemd_overrides: # ceph_rbd_mirror_systemd_overrides:
# Service: # Service:
# PrivateDevices: False # PrivateDevices: false

View File

@ -26,10 +26,10 @@ dummy:
# - no-tlsv11 # - no-tlsv11
# - no-tls-tickets # - no-tls-tickets
# #
#virtual_ips: # virtual_ips:
# - 192.168.238.250 # - 192.168.238.250
# - 192.168.238.251 # - 192.168.238.251
# #
#virtual_ip_netmask: 24 # virtual_ip_netmask: 24
#virtual_ip_interface: ens33 # virtual_ip_interface: ens33

View File

@ -45,30 +45,30 @@ dummy:
# If the key doesn't exist it falls back to the default replicated_rule. # If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure. # This only works for replicated pool type not erasure.
#rgw_create_pools: # rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data": # "{{ rgw_zone }}.rgw.buckets.data":
# pg_num: 64 # pg_num: 64
# type: ec # type: ec
# ec_profile: myecprofile # ec_profile: myecprofile
# ec_k: 5 # ec_k: 5
# ec_m: 3 # ec_m: 3
# "{{ rgw_zone }}.rgw.buckets.index": # "{{ rgw_zone }}.rgw.buckets.index":
# pg_num: 16 # pg_num: 16
# size: 3 # size: 3
# type: replicated # type: replicated
# "{{ rgw_zone }}.rgw.meta": # "{{ rgw_zone }}.rgw.meta":
# pg_num: 8 # pg_num: 8
# size: 3 # size: 3
# type: replicated # type: replicated
# "{{ rgw_zone }}.rgw.log": # "{{ rgw_zone }}.rgw.log":
# pg_num: 8 # pg_num: 8
# size: 3 # size: 3
# type: replicated # type: replicated
# "{{ rgw_zone }}.rgw.control": # "{{ rgw_zone }}.rgw.control":
# pg_num: 8 # pg_num: 8
# size: 3 # size: 3
# type: replicated # type: replicated
# rule_name: foo # rule_name: foo
########## ##########
@ -81,8 +81,8 @@ dummy:
# These options can be passed using the 'ceph_rgw_docker_extra_env' variable. # These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
#ceph_rgw_docker_memory_limit: "4096m" #ceph_rgw_docker_memory_limit: "4096m"
#ceph_rgw_docker_cpu_limit: 8 #ceph_rgw_docker_cpu_limit: 8
#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" # ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
#ceph_rgw_docker_cpuset_mems: "0" # ceph_rgw_docker_cpuset_mems: "0"
#ceph_rgw_docker_extra_env: #ceph_rgw_docker_extra_env:
#ceph_config_keys: [] # DON'T TOUCH ME #ceph_config_keys: [] # DON'T TOUCH ME
@ -94,7 +94,7 @@ dummy:
# ceph_rgw_systemd_overrides will override the systemd settings # ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services. # for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify: # For example,to set "PrivateDevices=false" you can specify:
#ceph_rgw_systemd_overrides: # ceph_rgw_systemd_overrides:
# Service: # Service:
# PrivateDevices: False # PrivateDevices: false

View File

@ -74,7 +74,7 @@ dummy:
# If configure_firewall is true, then ansible will try to configure the # If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate # appropriate firewalling rules so that Ceph daemons can communicate
# with each others. # with each others.
#configure_firewall: True #configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it # Open ports on corresponding nodes if firewall is installed on it
#ceph_mon_firewall_zone: public #ceph_mon_firewall_zone: public
@ -120,7 +120,7 @@ dummy:
# This variable determines if ceph packages can be updated. If False, the # This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use # package resources will use "state=present". If True, they will use
# "state=latest". # "state=latest".
#upgrade_ceph_packages: False #upgrade_ceph_packages: false
#ceph_use_distro_backports: false # DEBIAN ONLY #ceph_use_distro_backports: false # DEBIAN ONLY
#ceph_directories_mode: "0755" #ceph_directories_mode: "0755"
@ -171,7 +171,7 @@ ceph_repository: rhcs
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0) # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -229,7 +229,7 @@ ceph_iscsi_config_dev: false
# a URL to the .repo file to be installed on the targets. For deb, # a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base. # ceph_custom_repo should be the URL to the repo base.
# #
#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc # ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo #ceph_custom_repo: https://server.domain.com/ceph-custom-repo
@ -238,14 +238,14 @@ ceph_iscsi_config_dev: false
# Enabled when ceph_repository == 'local' # Enabled when ceph_repository == 'local'
# #
# Path to DESTDIR of the ceph install # Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/" # ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh # Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine # This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have # If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed # all runtime dependencies installed
#use_installer: false # use_installer: false
# Root directory for ceph-ansible # Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible" # ansible_dir: "/path/to/ceph-ansible"
###################### ######################
@ -328,12 +328,12 @@ ceph_iscsi_config_dev: false
#ip_version: ipv4 #ip_version: ipv4
#mon_host_v1: #mon_host_v1:
# enabled: True # enabled: true
# suffix: ':6789' # suffix: ':6789'
#mon_host_v2: #mon_host_v2:
# suffix: ':3300' # suffix: ':3300'
#enable_ceph_volume_debug: False #enable_ceph_volume_debug: false
########## ##########
# CEPHFS # # CEPHFS #
@ -405,7 +405,7 @@ ceph_iscsi_config_dev: false
## Testing mode ## Testing mode
# enable this mode _only_ when you have a single node # enable this mode _only_ when you have a single node
# if you don't want it keep the option commented # if you don't want it keep the option commented
#common_single_host_mode: true # common_single_host_mode: true
## Handlers - restarting daemons after a config change ## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes # if for whatever reasons the content of your ceph configuration changes
@ -527,10 +527,10 @@ ceph_docker_image: "rhceph/rhceph-5-rhel8"
ceph_docker_image_tag: "latest" ceph_docker_image_tag: "latest"
ceph_docker_registry: "registry.redhat.io" ceph_docker_registry: "registry.redhat.io"
ceph_docker_registry_auth: true ceph_docker_registry_auth: true
#ceph_docker_registry_username: # ceph_docker_registry_username:
#ceph_docker_registry_password: # ceph_docker_registry_password:
#ceph_docker_http_proxy: # ceph_docker_http_proxy:
#ceph_docker_https_proxy: # ceph_docker_https_proxy:
#ceph_docker_no_proxy: "localhost,127.0.0.1" #ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }} ## Client only docker image - defaults to {{ ceph_docker_image }}
#ceph_client_docker_image: "{{ ceph_docker_image }}" #ceph_client_docker_image: "{{ ceph_docker_image }}"
@ -563,7 +563,7 @@ containerized_deployment: true
# name: "images" # name: "images"
# rule_name: "my_replicated_rule" # rule_name: "my_replicated_rule"
# application: "rbd" # application: "rbd"
# pg_autoscale_mode: False # pg_autoscale_mode: false
# pg_num: 16 # pg_num: 16
# pgp_num: 16 # pgp_num: 16
# target_size_ratio: 0.2 # target_size_ratio: 0.2
@ -613,7 +613,7 @@ containerized_deployment: true
############# #############
# DASHBOARD # # DASHBOARD #
############# #############
#dashboard_enabled: True #dashboard_enabled: true
# Choose http or https # Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key # For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '', # If you define the dashboard_crt and dashboard_key variables, but leave them as '',
@ -625,7 +625,7 @@ containerized_deployment: true
#dashboard_admin_user: admin #dashboard_admin_user: admin
#dashboard_admin_user_ro: false #dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True # This variable must be set with a strong custom password when dashboard_enabled is True
#dashboard_admin_password: p@ssw0rd # dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections # We only need this for SSL (https) connections
#dashboard_crt: '' #dashboard_crt: ''
#dashboard_key: '' #dashboard_key: ''
@ -634,7 +634,7 @@ containerized_deployment: true
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" #dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_rgw_api_user_id: ceph-dashboard #dashboard_rgw_api_user_id: ceph-dashboard
#dashboard_rgw_api_admin_resource: '' #dashboard_rgw_api_admin_resource: ''
#dashboard_rgw_api_no_ssl_verify: False #dashboard_rgw_api_no_ssl_verify: false
#dashboard_frontend_vip: '' #dashboard_frontend_vip: ''
#dashboard_disabled_features: [] #dashboard_disabled_features: []
#prometheus_frontend_vip: '' #prometheus_frontend_vip: ''
@ -643,7 +643,7 @@ node_exporter_container_image: registry.redhat.io/openshift4/ose-prometheus-node
#node_exporter_port: 9100 #node_exporter_port: 9100
#grafana_admin_user: admin #grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True # This variable must be set with a strong custom password when dashboard_enabled is True
#grafana_admin_password: admin # grafana_admin_password: admin
# We only need this for SSL (https) connections # We only need this for SSL (https) connections
#grafana_crt: '' #grafana_crt: ''
#grafana_key: '' #grafana_key: ''
@ -675,7 +675,7 @@ grafana_container_image: registry.redhat.io/rhceph/rhceph-5-dashboard-rhel8:5
#grafana_plugins: #grafana_plugins:
# - vonage-status-panel # - vonage-status-panel
# - grafana-piechart-panel # - grafana-piechart-panel
#grafana_allow_embedding: True #grafana_allow_embedding: true
#grafana_port: 3000 #grafana_port: 3000
#grafana_network: "{{ public_network }}" #grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {} #grafana_conf_overrides: {}
@ -691,7 +691,7 @@ prometheus_container_image: registry.redhat.io/openshift4/ose-prometheus:v4.6
#prometheus_conf_overrides: {} #prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage. # Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data. # set it to '30d' if you want to retain 30 days of data.
#prometheus_storage_tsdb_retention_time: 15d # prometheus_storage_tsdb_retention_time: 15d
alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6 alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6
#alertmanager_container_cpu_period: 100000 #alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2 #alertmanager_container_cpu_cores: 2
@ -749,11 +749,11 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
# #
# Example: # Example:
# #
#rbd_devices: # rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {} #rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs # client_connections defines the client ACL's to restrict client access to specific LUNs
@ -767,20 +767,19 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
# #
# Example: # Example:
# #
#client_connections: # client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } # - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } # - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {} #client_connections: {}
#no_log_on_ceph_key_tasks: True #no_log_on_ceph_key_tasks: true
############### ###############
# DEPRECATION # # DEPRECATION #
############### ###############
###################################################### ######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM # # *DO NOT* MODIFY THEM #
@ -788,5 +787,5 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
#container_exec_cmd: #container_exec_cmd:
#docker: false #docker: false
#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" #ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -6,26 +6,30 @@
# Ensure that all monitors are present in the mons # Ensure that all monitors are present in the mons
# group in your inventory so that the ceph configuration file # group in your inventory so that the ceph configuration file
# is created correctly for the new OSD(s). # is created correctly for the new OSD(s).
- hosts: mons - name: Pre-requisites operations for adding new monitor(s)
hosts: mons
gather_facts: false gather_facts: false
vars: vars:
delegate_facts_host: true delegate_facts_host: true
become: true become: true
pre_tasks: pre_tasks:
- import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" - name: Import raw_install_python tasks
ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
- name: gather facts - name: Gather facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
- '!ohai' - '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: gather and delegate facts - name: Gather and delegate facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
@ -36,52 +40,84 @@
run_once: true run_once: true
when: delegate_facts_host | bool when: delegate_facts_host | bool
tasks: tasks:
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role:
- name: Import ceph-validate role
ansible.builtin.import_role:
name: ceph-validate name: ceph-validate
- import_role:
- name: Import ceph-infra role
ansible.builtin.import_role:
name: ceph-infra name: ceph-infra
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role:
- name: Import ceph-common role
ansible.builtin.import_role:
name: ceph-common name: ceph-common
when: not containerized_deployment | bool when: not containerized_deployment | bool
- import_role:
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
when: containerized_deployment | bool when: containerized_deployment | bool
- import_role:
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
when: containerized_deployment | bool when: containerized_deployment | bool
- hosts: mons - name: Deploy Ceph monitors
hosts: mons
gather_facts: false gather_facts: false
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role:
- name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config name: ceph-config
- import_role:
- name: Import ceph-mon role
ansible.builtin.import_role:
name: ceph-mon name: ceph-mon
- import_role:
- name: Import ceph-crash role
ansible.builtin.import_role:
name: ceph-crash name: ceph-crash
when: containerized_deployment | bool when: containerized_deployment | bool
# update config files on OSD nodes - name: Update config file on OSD nodes
- hosts: osds hosts: osds
gather_facts: true gather_facts: true
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role:
- name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config name: ceph-config

View File

@ -19,12 +19,13 @@
# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01 # ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01
# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01 # ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01
- hosts: localhost - name: Backup and restore Ceph files
hosts: localhost
become: true become: true
gather_facts: true gather_facts: true
tasks: tasks:
- name: exit playbook, if user did not set the source node - name: Exit playbook, if user did not set the source node
fail: ansible.builtin.fail:
msg: > msg: >
"You must pass the node name: -e target_node=<inventory_name>. "You must pass the node name: -e target_node=<inventory_name>.
The name must match what is set in your inventory." The name must match what is set in your inventory."
@ -32,71 +33,73 @@
- target_node is not defined - target_node is not defined
or target_node not in groups.get('all', []) or target_node not in groups.get('all', [])
- name: exit playbook, if user did not set the backup directory - name: Exit playbook, if user did not set the backup directory
fail: ansible.builtin.fail:
msg: > msg: >
"you must pass the backup directory path: -e backup_dir=<backup directory path>" "you must pass the backup directory path: -e backup_dir=<backup directory path>"
when: backup_dir is not defined when: backup_dir is not defined
- name: exit playbook, if user did not set the playbook mode (backup|restore) - name: Exit playbook, if user did not set the playbook mode (backup|restore)
fail: ansible.builtin.fail:
msg: > msg: >
"you must pass the mode: -e mode=<backup|restore>" "you must pass the mode: -e mode=<backup|restore>"
when: when:
- mode is not defined - mode is not defined
or mode not in ['backup', 'restore'] or mode not in ['backup', 'restore']
- name: gather facts on source node - name: Gather facts on source node
setup: ansible.builtin.setup:
delegate_to: "{{ target_node }}" delegate_to: "{{ target_node }}"
delegate_facts: true delegate_facts: true
- name: backup mode - name: Backup mode
when: mode == 'backup' when: mode == 'backup'
block: block:
- name: create a temp directory - name: Create a temp directory
ansible.builtin.tempfile: ansible.builtin.tempfile:
state: directory state: directory
suffix: ansible-archive-ceph suffix: ansible-archive-ceph
register: tmp_dir register: tmp_dir
delegate_to: "{{ target_node }}" delegate_to: "{{ target_node }}"
- name: archive files - name: Archive files
archive: community.general.archive:
path: "{{ item }}" path: "{{ item }}"
dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar" dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
format: tar format: tar
mode: "0644"
delegate_to: "{{ target_node }}" delegate_to: "{{ target_node }}"
loop: loop:
- /etc/ceph - /etc/ceph
- /var/lib/ceph - /var/lib/ceph
- name: create backup directory - name: Create backup directory
become: false become: false
file: ansible.builtin.file:
path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}" path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}"
state: directory state: directory
mode: "0755"
- name: backup files - name: Backup files
fetch: ansible.builtin.fetch:
src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar" src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar" dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
flat: yes flat: true
loop: loop:
- /etc/ceph - /etc/ceph
- /var/lib/ceph - /var/lib/ceph
delegate_to: "{{ target_node }}" delegate_to: "{{ target_node }}"
- name: remove temp directory - name: Remove temp directory
file: ansible.builtin.file:
path: "{{ tmp_dir.path }}" path: "{{ tmp_dir.path }}"
state: absent state: absent
delegate_to: "{{ target_node }}" delegate_to: "{{ target_node }}"
- name: restore mode - name: Restore mode
when: mode == 'restore' when: mode == 'restore'
block: block:
- name: unarchive files - name: Unarchive files
ansible.builtin.unarchive: ansible.builtin.unarchive:
src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar" src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
dest: "{{ item | dirname }}" dest: "{{ item | dirname }}"

View File

@ -4,7 +4,8 @@
# #
# It currently runs on localhost # It currently runs on localhost
- hosts: localhost - name: CephX key management examples
hosts: localhost
gather_facts: false gather_facts: false
vars: vars:
cluster: ceph cluster: ceph
@ -17,12 +18,12 @@
- client.leseb1 - client.leseb1
- client.pythonnnn - client.pythonnnn
keys_to_create: keys_to_create:
- { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" }, mode: "0600" }
- { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" }
- { name: client.path, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } - { name: client.path, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" }
tasks: tasks:
- name: create ceph key(s) module - name: Create ceph key(s) module
ceph_key: ceph_key:
name: "{{ item.name }}" name: "{{ item.name }}"
caps: "{{ item.caps }}" caps: "{{ item.caps }}"
@ -31,7 +32,7 @@
containerized: "{{ container_exec_cmd | default(False) }}" containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}" with_items: "{{ keys_to_create }}"
- name: update ceph key(s) - name: Update ceph key(s)
ceph_key: ceph_key:
name: "{{ item.name }}" name: "{{ item.name }}"
state: update state: update
@ -40,7 +41,7 @@
containerized: "{{ container_exec_cmd | default(False) }}" containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}" with_items: "{{ keys_to_create }}"
- name: delete ceph key(s) - name: Delete ceph key(s)
ceph_key: ceph_key:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
@ -48,7 +49,7 @@
containerized: "{{ container_exec_cmd | default(False) }}" containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_delete }}" with_items: "{{ keys_to_delete }}"
- name: info ceph key(s) - name: Info ceph key(s)
ceph_key: ceph_key:
name: "{{ item }}" name: "{{ item }}"
state: info state: info
@ -58,7 +59,7 @@
ignore_errors: true ignore_errors: true
with_items: "{{ keys_to_info }}" with_items: "{{ keys_to_info }}"
- name: list ceph key(s) - name: List ceph key(s)
ceph_key: ceph_key:
state: list state: list
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -66,7 +67,7 @@
register: list_keys register: list_keys
ignore_errors: true ignore_errors: true
- name: fetch_initial_keys - name: Fetch_initial_keys # noqa: ignore-errors
ceph_key: ceph_key:
state: fetch_initial_keys state: fetch_initial_keys
cluster: "{{ cluster }}" cluster: "{{ cluster }}"

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
--- ---
- name: gather facts and prepare system for cephadm - name: Gather facts and prepare system for cephadm
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}" - "{{ osd_group_name|default('osds') }}"
@ -15,23 +15,24 @@
vars: vars:
delegate_facts_host: true delegate_facts_host: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: validate if monitor group doesn't exist or empty - name: Validate if monitor group doesn't exist or empty
fail: ansible.builtin.fail:
msg: "you must add a [mons] group and add at least one node." msg: "you must add a [mons] group and add at least one node."
run_once: true run_once: true
when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0 when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0
- name: validate if manager group doesn't exist or empty - name: Validate if manager group doesn't exist or empty
fail: ansible.builtin.fail:
msg: "you must add a [mgrs] group and add at least one node." msg: "you must add a [mgrs] group and add at least one node."
run_once: true run_once: true
when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0 when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0
- name: validate monitor network configuration - name: Validate monitor network configuration
fail: ansible.builtin.fail:
msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided" msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
when: when:
- mon_group_name in group_names - mon_group_name in group_names
@ -39,38 +40,38 @@
- monitor_address_block == 'subnet' - monitor_address_block == 'subnet'
- monitor_interface == 'interface' - monitor_interface == 'interface'
- name: validate dashboard configuration - name: Validate dashboard configuration
when: dashboard_enabled | bool when: dashboard_enabled | bool
run_once: true run_once: true
block: block:
- name: fail if [monitoring] group doesn't exist or empty - name: Fail if [monitoring] group doesn't exist or empty
fail: ansible.builtin.fail:
msg: "you must add a [monitoring] group and add at least one node." msg: "you must add a [monitoring] group and add at least one node."
when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0 when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0
- name: fail when dashboard_admin_password is not set - name: Fail when dashboard_admin_password is not set
fail: ansible.builtin.fail:
msg: "you must set dashboard_admin_password." msg: "you must set dashboard_admin_password."
when: dashboard_admin_password is undefined when: dashboard_admin_password is undefined
- name: validate container registry credentials - name: Validate container registry credentials
fail: ansible.builtin.fail:
msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set' msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set'
when: when:
- ceph_docker_registry_auth | bool - ceph_docker_registry_auth | bool
- (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or
(ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0) (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0)
- name: gather facts - name: Gather facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
- '!ohai' - '!ohai'
when: not delegate_facts_host | bool when: not delegate_facts_host | bool
- name: gather and delegate facts - name: Gather and delegate facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
@ -81,76 +82,82 @@
run_once: true run_once: true
when: delegate_facts_host | bool when: delegate_facts_host | bool
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary.yml tasks_from: container_binary.yml
- name: check if it is atomic host - name: Check if it is atomic host
stat: ansible.builtin.stat:
path: /run/ostree-booted path: /run/ostree-booted
register: stat_ostree register: stat_ostree
- name: set_fact is_atomic - name: Set_fact is_atomic
set_fact: ansible.builtin.set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}" is_atomic: "{{ stat_ostree.stat.exists }}"
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
tasks_from: registry.yml tasks_from: registry.yml
when: ceph_docker_registry_auth | bool when: ceph_docker_registry_auth | bool
- name: configure repository for installing cephadm - name: Configure repository for installing cephadm
vars: vars:
ceph_origin: repository ceph_origin: repository
ceph_repository: community ceph_repository: community
block: block:
- name: validate repository variables - name: Validate repository variables
import_role: ansible.builtin.import_role:
name: ceph-validate name: ceph-validate
tasks_from: check_repository.yml tasks_from: check_repository.yml
- name: configure repository - name: Configure repository
import_role: ansible.builtin.import_role:
name: ceph-common name: ceph-common
tasks_from: "configure_repository.yml" tasks_from: "configure_repository.yml"
- name: install cephadm requirements - name: Install cephadm requirements
package: ansible.builtin.package:
name: ['python3', 'lvm2'] name: ['python3', 'lvm2']
register: result register: result
until: result is succeeded until: result is succeeded
- name: install cephadm - name: Install cephadm
package: ansible.builtin.package:
name: cephadm name: cephadm
register: result register: result
until: result is succeeded until: result is succeeded
- name: set_fact cephadm_cmd - name: Set_fact cephadm_cmd
set_fact: ansible.builtin.set_fact:
cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}" cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"
- name: bootstrap the cluster - name: Bootstrap the cluster
hosts: "{{ mon_group_name|default('mons') }}[0]" hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true become: true
gather_facts: false gather_facts: false
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: set_monitor_address.yml tasks_from: set_monitor_address.yml
- name: create /etc/ceph directory - name: Create /etc/ceph directory
file: ansible.builtin.file:
path: /etc/ceph path: /etc/ceph
state: directory state: directory
mode: "0755"
- name: bootstrap the new cluster - name: Bootstrap the new cluster
cephadm_bootstrap: cephadm_bootstrap:
mon_ip: "{{ _current_monitor_address }}" mon_ip: "{{ _current_monitor_address }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
@ -164,46 +171,46 @@
ssh_user: "{{ cephadm_ssh_user | default('root') }}" ssh_user: "{{ cephadm_ssh_user | default('root') }}"
ssh_config: "{{ cephadm_ssh_config | default(omit) }}" ssh_config: "{{ cephadm_ssh_config | default(omit) }}"
- name: set default container image in ceph configuration - name: Set default container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set container image base in ceph configuration - name: Set container image base in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set dashboard container image in ceph mgr configuration - name: Set dashboard container image in ceph mgr configuration
when: dashboard_enabled | bool when: dashboard_enabled | bool
block: block:
- name: set alertmanager container image in ceph configuration - name: Set alertmanager container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set grafana container image in ceph configuration - name: Set grafana container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set node-exporter container image in ceph configuration - name: Set node-exporter container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: set prometheus container image in ceph configuration - name: Set prometheus container image in ceph configuration
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: add the other nodes - name: Add the other nodes
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}" - "{{ osd_group_name|default('osds') }}"
@ -217,11 +224,12 @@
become: true become: true
gather_facts: false gather_facts: false
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: get the cephadm ssh pub key - name: Get the cephadm ssh pub key
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key"
changed_when: false changed_when: false
run_once: true run_once: true
register: cephadm_pubpkey register: cephadm_pubpkey
@ -229,35 +237,35 @@
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: allow cephadm key for {{ cephadm_ssh_user | default('root') }} account - name: Allow cephadm key
authorized_key: ansible.posix.authorized_key:
user: "{{ cephadm_ssh_user | default('root') }}" user: "{{ cephadm_ssh_user | default('root') }}"
key: '{{ cephadm_pubpkey.stdout }}' key: '{{ cephadm_pubpkey.stdout }}'
- name: run cephadm prepare-host - name: Run cephadm prepare-host
command: cephadm prepare-host ansible.builtin.command: cephadm prepare-host
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: manage nodes with cephadm - ipv4 - name: Manage nodes with cephadm - ipv4
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
changed_when: false changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}' delegate_to: '{{ groups[mon_group_name][0] }}'
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: ip_version == 'ipv4' when: ip_version == 'ipv4'
- name: manage nodes with cephadm - ipv6 - name: Manage nodes with cephadm - ipv6
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
changed_when: false changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}' delegate_to: '{{ groups[mon_group_name][0] }}'
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: ip_version == 'ipv6' when: ip_version == 'ipv6'
- name: add ceph label for core component - name: Add ceph label for core component
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}' delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or when: inventory_hostname in groups.get(mon_group_name, []) or
@ -269,22 +277,23 @@
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: adjust service placement - name: Adjust service placement
hosts: "{{ mon_group_name|default('mons') }}[0]" hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true become: true
gather_facts: false gather_facts: false
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: update the placement of monitor hosts - name: Update the placement of monitor hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: waiting for the monitor to join the quorum... - name: Waiting for the monitor to join the quorum...
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json"
changed_when: false changed_when: false
register: ceph_health_raw register: ceph_health_raw
until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length
@ -293,83 +302,85 @@
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of manager hosts - name: Update the placement of manager hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of crash hosts - name: Update the placement of crash hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: adjust monitoring service placement - name: Adjust monitoring service placement
hosts: "{{ monitoring_group_name|default('monitoring') }}" hosts: "{{ monitoring_group_name|default('monitoring') }}"
become: true become: true
gather_facts: false gather_facts: false
tasks: tasks:
- import_role: - name: Import ceph-defaults
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: with dashboard enabled - name: With dashboard enabled
when: dashboard_enabled | bool when: dashboard_enabled | bool
delegate_to: '{{ groups[mon_group_name][0] }}' delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true run_once: true
block: block:
- name: enable the prometheus module - name: Enable the prometheus module
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of alertmanager hosts - name: Update the placement of alertmanager hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of grafana hosts - name: Update the placement of grafana hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of prometheus hosts - name: Update the placement of prometheus hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: update the placement of node-exporter hosts - name: Update the placement of node-exporter hosts
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: print information - name: Print information
hosts: "{{ mon_group_name|default('mons') }}[0]" hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true become: true
gather_facts: false gather_facts: false
tasks: tasks:
- import_role: - name: Import ceph-defaults
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: show ceph orchestrator services - name: Show ceph orchestrator services
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: show ceph orchestrator daemons - name: Show ceph orchestrator daemons
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh" ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh"
changed_when: false changed_when: false
environment: environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: inform users about cephadm - name: Inform users about cephadm
debug: ansible.builtin.debug:
msg: | msg: |
This Ceph cluster is now ready to receive more configuration like This Ceph cluster is now ready to receive more configuration like
adding OSD, MDS daemons, create pools or keyring. adding OSD, MDS daemons, create pools or keyring.

View File

@ -5,54 +5,58 @@
# It is *not* intended to restart services since we don't want to multiple services # It is *not* intended to restart services since we don't want to multiple services
# restarts. # restarts.
- hosts: - name: Pre-requisite and facts gathering
- mons hosts:
- osds - mons
- mdss - osds
- rgws - mdss
- nfss - rgws
- rbdmirrors - nfss
- clients - rbdmirrors
- iscsigws - clients
- mgrs - iscsigws
- monitoring - mgrs
- monitoring
gather_facts: false gather_facts: false
become: True become: true
any_errors_fatal: true any_errors_fatal: true
vars: vars:
delegate_facts_host: True delegate_facts_host: true
pre_tasks: pre_tasks:
- import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" - name: Import raw_install_python tasks
ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
# pre-tasks for following import - # pre-tasks for following import -
- name: gather facts - name: Gather facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
- '!ohai' - '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- name: gather and delegate facts - name: Gather and delegate facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
- '!ohai' - '!ohai'
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
delegate_facts: True delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}" with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}"
run_once: true run_once: true
when: delegate_facts_host | bool when: delegate_facts_host | bool
- hosts: - name: Migrate to podman
hosts:
- "{{ mon_group_name | default('mons') }}" - "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}" - "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}" - "{{ mds_group_name | default('mdss') }}"
@ -65,20 +69,25 @@
gather_facts: false gather_facts: false
become: true become: true
tasks: tasks:
- name: set_fact docker2podman and container_binary - name: Set_fact docker2podman and container_binary
set_fact: ansible.builtin.set_fact:
docker2podman: True docker2podman: true
container_binary: podman container_binary: podman
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role:
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- name: install podman - name: Install podman
package: ansible.builtin.package:
name: podman name: podman
state: present state: present
register: result register: result
@ -86,17 +95,17 @@
tags: with_pkg tags: with_pkg
when: not is_atomic | bool when: not is_atomic | bool
- name: check podman presence # noqa : 305 - name: Check podman presence # noqa command-instead-of-shell
shell: command -v podman ansible.builtin.shell: command -v podman
register: podman_presence register: podman_presence
changed_when: false changed_when: false
failed_when: false failed_when: false
- name: pulling images from docker daemon - name: Pulling images from docker daemon
when: podman_presence.rc == 0 when: podman_presence.rc == 0
block: block:
- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image from docker daemon" - name: Pulling Ceph container image from docker daemon
command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false changed_when: false
register: pull_image register: pull_image
until: pull_image.rc == 0 until: pull_image.rc == 0
@ -111,8 +120,8 @@
inventory_hostname in groups.get(iscsi_gw_group_name, []) or inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, []) inventory_hostname in groups.get(nfs_group_name, [])
- name: "pulling alertmanager/grafana/prometheus images from docker daemon" - name: Pulling alertmanager/grafana/prometheus images from docker daemon
command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}" ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
changed_when: false changed_when: false
register: pull_image register: pull_image
until: pull_image.rc == 0 until: pull_image.rc == 0
@ -126,8 +135,8 @@
- dashboard_enabled | bool - dashboard_enabled | bool
- inventory_hostname in groups.get(monitoring_group_name, []) - inventory_hostname in groups.get(monitoring_group_name, [])
- name: "pulling {{ node_exporter_container_image }} image from docker daemon" - name: Pulling node_exporter image from docker daemon
command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}" ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}"
changed_when: false changed_when: false
register: pull_image register: pull_image
until: pull_image.rc == 0 until: pull_image.rc == 0
@ -135,47 +144,56 @@
delay: 10 delay: 10
when: dashboard_enabled | bool when: dashboard_enabled | bool
- import_role: - name: Import ceph-mon role
ansible.builtin.import_role:
name: ceph-mon name: ceph-mon
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, []) when: inventory_hostname in groups.get(mon_group_name, [])
- import_role: - name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw name: ceph-iscsi-gw
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(iscsi_gw_group_name, []) when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- import_role: - name: Import ceph-mds role
ansible.builtin.import_role:
name: ceph-mds name: ceph-mds
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(mds_group_name, []) when: inventory_hostname in groups.get(mds_group_name, [])
- import_role: - name: Import ceph-mgr role
ansible.builtin.import_role:
name: ceph-mgr name: ceph-mgr
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(mgr_group_name, []) when: inventory_hostname in groups.get(mgr_group_name, [])
- import_role: - name: Import ceph-nfs role
ansible.builtin.import_role:
name: ceph-nfs name: ceph-nfs
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(nfs_group_name, []) when: inventory_hostname in groups.get(nfs_group_name, [])
- import_role: - name: Import ceph-osd role
ansible.builtin.import_role:
name: ceph-osd name: ceph-osd
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(osd_group_name, []) when: inventory_hostname in groups.get(osd_group_name, [])
- import_role: - name: Import ceph-rbd-mirror role
ansible.builtin.import_role:
name: ceph-rbd-mirror name: ceph-rbd-mirror
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(rbdmirror_group_name, []) when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- import_role: - name: Import ceph-rgw role
ansible.builtin.import_role:
name: ceph-rgw name: ceph-rgw
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(rgw_group_name, []) when: inventory_hostname in groups.get(rgw_group_name, [])
- import_role: - name: Import ceph-crash role
ansible.builtin.import_role:
name: ceph-crash name: ceph-crash
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, []) or when: inventory_hostname in groups.get(mon_group_name, []) or
@ -185,28 +203,32 @@
inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: dashboard configuration - name: Dashboard configuration
when: dashboard_enabled | bool when: dashboard_enabled | bool
block: block:
- import_role: - name: Import ceph-node-exporter role
ansible.builtin.import_role:
name: ceph-node-exporter name: ceph-node-exporter
tasks_from: systemd.yml tasks_from: systemd.yml
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: grafana.yml tasks_from: grafana.yml
when: inventory_hostname in groups.get(monitoring_group_name, []) when: inventory_hostname in groups.get(monitoring_group_name, [])
- import_role: - name: Import ceph-grafana role
ansible.builtin.import_role:
name: ceph-grafana name: ceph-grafana
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(monitoring_group_name, []) when: inventory_hostname in groups.get(monitoring_group_name, [])
- import_role: - name: Import ceph-prometheus role
ansible.builtin.import_role:
name: ceph-prometheus name: ceph-prometheus
tasks_from: systemd.yml tasks_from: systemd.yml
when: inventory_hostname in groups.get(monitoring_group_name, []) when: inventory_hostname in groups.get(monitoring_group_name, [])
- name: reload systemd daemon - name: Reload systemd daemon
systemd: ansible.builtin.systemd:
daemon_reload: yes daemon_reload: true

View File

@ -1,20 +1,22 @@
- hosts: ---
- mons - name: Gather ceph logs
- osds hosts:
- mdss - mons
- rgws - osds
- nfss - mdss
- rbdmirrors - rgws
- clients - nfss
- mgrs - rbdmirrors
- iscsigws - clients
- mgrs
- iscsigws
gather_facts: false gather_facts: false
become: yes become: true
tasks: tasks:
- name: create a temp directory - name: Create a temp directory
tempfile: ansible.builtin.tempfile:
state: directory state: directory
prefix: ceph_ansible prefix: ceph_ansible
run_once: true run_once: true
@ -22,17 +24,17 @@
become: false become: false
delegate_to: localhost delegate_to: localhost
- name: set_fact lookup_ceph_config - lookup keys, conf and logs - name: Set_fact lookup_ceph_config - lookup keys, conf and logs
find: ansible.builtin.find:
paths: paths:
- /etc/ceph - /etc/ceph
- /var/log/ceph - /var/log/ceph
register: ceph_collect register: ceph_collect
- name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible - name: Collect ceph logs, config and keys on the machine running ansible
fetch: ansible.builtin.fetch:
src: "{{ item.path }}" src: "{{ item.path }}"
dest: "{{ localtempfile.path }}" dest: "{{ localtempfile.path }}"
fail_on_missing: no fail_on_missing: false
flat: no flat: false
with_items: "{{ ceph_collect.files }}" with_items: "{{ ceph_collect.files }}"

View File

@ -1,4 +1,5 @@
- name: creates logical volumes for the bucket index or fs journals on a single device. ---
- name: Creates logical volumes for the bucket index or fs journals on a single device.
become: true become: true
hosts: osds hosts: osds
@ -21,78 +22,79 @@
tasks: tasks:
- name: include vars of lv_vars.yaml - name: Include vars of lv_vars.yaml
include_vars: ansible.builtin.include_vars:
file: lv_vars.yaml # noqa 505 file: lv_vars.yaml # noqa missing-import
failed_when: false failed_when: false
# ensure nvme_device is set # ensure nvme_device is set
- name: fail if nvme_device is not defined - name: Fail if nvme_device is not defined
fail: ansible.builtin.fail:
msg: "nvme_device has not been set by the user" msg: "nvme_device has not been set by the user"
when: nvme_device is undefined or nvme_device == 'dummy' when: nvme_device is undefined or nvme_device == 'dummy'
# need to check if lvm2 is installed # need to check if lvm2 is installed
- name: install lvm2 - name: Install lvm2
package: ansible.builtin.package:
name: lvm2 name: lvm2
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
# Make entire nvme device a VG # Make entire nvme device a VG
- name: add nvme device as lvm pv - name: Add nvme device as lvm pv
lvg: community.general.lvg:
force: yes force: true
pvs: "{{ nvme_device }}" pvs: "{{ nvme_device }}"
pesize: 4 pesize: 4
state: present state: present
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
- name: create lvs for fs journals for the bucket index on the nvme device - name: Create lvs for fs journals for the bucket index on the nvme device
lvol: community.general.lvol:
lv: "{{ item.journal_name }}" lv: "{{ item.journal_name }}"
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}" size: "{{ journal_size }}"
pvs: "{{ nvme_device }}" pvs: "{{ nvme_device }}"
with_items: "{{ nvme_device_lvs }}" with_items: "{{ nvme_device_lvs }}"
- name: create lvs for fs journals for hdd devices - name: Create lvs for fs journals for hdd devices
lvol: community.general.lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
size: "{{ journal_size }}" size: "{{ journal_size }}"
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"
- name: create the lv for data portion of the bucket index on the nvme device - name: Create the lv for data portion of the bucket index on the nvme device
lvol: community.general.lvol:
lv: "{{ item.lv_name }}" lv: "{{ item.lv_name }}"
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
size: "{{ item.size }}" size: "{{ item.size }}"
pvs: "{{ nvme_device }}" pvs: "{{ nvme_device }}"
with_items: "{{ nvme_device_lvs }}" with_items: "{{ nvme_device_lvs }}"
# Make sure all hdd devices have a unique volume group # Make sure all hdd devices have a unique volume group
- name: create vgs for all hdd devices - name: Create vgs for all hdd devices
lvg: community.general.lvg:
force: yes force: true
pvs: "{{ item }}" pvs: "{{ item }}"
pesize: 4 pesize: 4
state: present state: present
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"
- name: create lvs for the data portion on hdd devices - name: Create lvs for the data portion on hdd devices
lvol: community.general.lvol:
lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
size: "{{ hdd_lv_size }}" size: "{{ hdd_lv_size }}"
pvs: "{{ item }}" pvs: "{{ item }}"
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"
- name: "write output for osds.yml to {{ logfile_path }}" - name: Write output for osds.yml
become: false become: false
copy: ansible.builtin.copy:
content: "{{ logfile }}" content: "{{ logfile }}"
dest: "{{ logfile_path }}" dest: "{{ logfile_path }}"
delegate_to: localhost mode: preserve
delegate_to: localhost

View File

@ -1,108 +1,109 @@
- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes ---
- name: Tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
become: true become: true
hosts: osds hosts: osds
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to tear down the logical volumes? prompt: Are you sure you want to tear down the logical volumes?
default: 'no' default: 'no'
private: no private: false
tasks: tasks:
- name: exit playbook, if user did not mean to tear down logical volumes - name: Exit playbook, if user did not mean to tear down logical volumes
fail: ansible.builtin.fail:
msg: > msg: >
"Exiting lv-teardown playbook, logical volumes were NOT torn down. "Exiting lv-teardown playbook, logical volumes were NOT torn down.
To tear down the logical volumes, either say 'yes' on the prompt or To tear down the logical volumes, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: include vars of lv_vars.yaml - name: Include vars of lv_vars.yaml
include_vars: ansible.builtin.include_vars:
file: lv_vars.yaml # noqa 505 file: lv_vars.yaml # noqa missing-import
failed_when: false failed_when: false
# need to check if lvm2 is installed # need to check if lvm2 is installed
- name: install lvm2 - name: Install lvm2
package: ansible.builtin.package:
name: lvm2 name: lvm2
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
# BEGIN TEARDOWN # BEGIN TEARDOWN
- name: find any existing osd filesystems - name: Find any existing osd filesystems
shell: | ansible.builtin.shell: |
set -o pipefail; set -o pipefail;
grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}' grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
register: old_osd_filesystems register: old_osd_filesystems
changed_when: false changed_when: false
- name: tear down any existing osd filesystem - name: Tear down any existing osd filesystem
ansible.posix.mount: ansible.posix.mount:
path: "{{ item }}" path: "{{ item }}"
state: unmounted state: unmounted
with_items: "{{ old_osd_filesystems.stdout_lines }}" with_items: "{{ old_osd_filesystems.stdout_lines }}"
- name: kill all lvm commands that may have been hung - name: Kill all lvm commands that may have been hung
command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n" ansible.builtin.command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
failed_when: false failed_when: false
changed_when: false changed_when: false
## Logcal Vols ## Logcal Vols
- name: tear down existing lv for bucket index - name: Tear down existing lv for bucket index
lvol: community.general.lvol:
lv: "{{ item.lv_name }}" lv: "{{ item.lv_name }}"
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
state: absent state: absent
force: yes force: true
with_items: "{{ nvme_device_lvs }}" with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing hdd data lvs - name: Tear down any existing hdd data lvs
lvol: community.general.lvol:
lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent state: absent
force: yes force: true
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"
- name: tear down any existing lv of journal for bucket index - name: Tear down any existing lv of journal for bucket index
lvol: community.general.lvol:
lv: "{{ item.journal_name }}" lv: "{{ item.journal_name }}"
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
state: absent state: absent
force: yes force: true
with_items: "{{ nvme_device_lvs }}" with_items: "{{ nvme_device_lvs }}"
- name: tear down any existing lvs of hdd journals - name: Tear down any existing lvs of hdd journals
lvol: community.general.lvol:
lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
state: absent state: absent
force: yes force: true
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"
## Volume Groups ## Volume Groups
- name: remove vg on nvme device - name: Remove vg on nvme device
lvg: community.general.lvg:
vg: "{{ nvme_vg_name }}" vg: "{{ nvme_vg_name }}"
state: absent state: absent
force: yes force: true
- name: remove vg for each hdd device - name: Remove vg for each hdd device
lvg: community.general.lvg:
vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
state: absent state: absent
force: yes force: true
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"
## Physical Vols ## Physical Vols
- name: tear down pv for nvme device - name: Tear down pv for nvme device
command: "pvremove --force --yes {{ nvme_device }}" ansible.builtin.command: "pvremove --force --yes {{ nvme_device }}"
changed_when: false changed_when: false
- name: tear down pv for each hdd device - name: Tear down pv for each hdd device
command: "pvremove --force --yes {{ item }}" ansible.builtin.command: "pvremove --force --yes {{ item }}"
changed_when: false changed_when: false
with_items: "{{ hdd_devices }}" with_items: "{{ hdd_devices }}"

File diff suppressed because it is too large Load Diff

View File

@ -13,17 +13,17 @@
# Overrides the prompt using -e option. Can be used in # Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to purge the dashboard - name: Confirm whether user really meant to purge the dashboard
hosts: localhost hosts: localhost
gather_facts: false gather_facts: false
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to purge the dashboard? prompt: Are you sure you want to purge the dashboard?
default: 'no' default: 'no'
private: no private: false
tasks: tasks:
- name: exit playbook, if user did not mean to purge dashboard - name: Exit playbook, if user did not mean to purge dashboard
fail: ansible.builtin.fail:
msg: > msg: >
"Exiting purge-dashboard playbook, dashboard was NOT purged. "Exiting purge-dashboard playbook, dashboard was NOT purged.
To purge the dashboard, either say 'yes' on the prompt or To purge the dashboard, either say 'yes' on the prompt or
@ -31,18 +31,18 @@
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: import_role ceph-defaults - name: Import_role ceph-defaults
import_role: ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: check if a legacy grafana-server group exists - name: Check if a legacy grafana-server group exists
import_role: ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: convert_grafana_server_group_name.yml tasks_from: convert_grafana_server_group_name.yml
when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0 when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
- name: gather facts on all hosts - name: Gather facts on all hosts
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}" - "{{ osd_group_name|default('osds') }}"
@ -55,9 +55,11 @@
- "{{ monitoring_group_name | default('monitoring') }}" - "{{ monitoring_group_name | default('monitoring') }}"
become: true become: true
tasks: tasks:
- debug: msg="gather facts on all Ceph hosts for following reference" - name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: purge node exporter - name: Purge node exporter
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}" - "{{ osd_group_name|default('osds') }}"
@ -71,58 +73,62 @@
gather_facts: false gather_facts: false
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: disable node_exporter service - name: Disable node_exporter service
service: ansible.builtin.service:
name: node_exporter name: node_exporter
state: stopped state: stopped
enabled: no enabled: false
failed_when: false failed_when: false
- name: remove node_exporter service files - name: Remove node_exporter service files
file: ansible.builtin.file:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
loop: loop:
- /etc/systemd/system/node_exporter.service - /etc/systemd/system/node_exporter.service
- /run/node_exporter.service-cid - /run/node_exporter.service-cid
- name: remove node-exporter image - name: Remove node-exporter image
command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
changed_when: false changed_when: false
failed_when: false failed_when: false
- name: purge ceph monitoring - name: Purge ceph monitoring
hosts: "{{ monitoring_group_name | default('monitoring') }}" hosts: "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false gather_facts: false
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: stop services - name: Stop services
service: ansible.builtin.service:
name: "{{ item }}" name: "{{ item }}"
state: stopped state: stopped
enabled: no enabled: false
failed_when: false failed_when: false
loop: loop:
- alertmanager - alertmanager
- prometheus - prometheus
- grafana-server - grafana-server
- name: remove systemd service files - name: Remove systemd service files
file: ansible.builtin.file:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
loop: loop:
@ -133,8 +139,8 @@
- /run/prometheus.service-cid - /run/prometheus.service-cid
- /run/grafana-server.service-cid - /run/grafana-server.service-cid
- name: remove ceph dashboard container images - name: Remove ceph dashboard container images
command: "{{ container_binary }} rmi {{ item }}" ansible.builtin.command: "{{ container_binary }} rmi {{ item }}"
loop: loop:
- "{{ alertmanager_container_image }}" - "{{ alertmanager_container_image }}"
- "{{ prometheus_container_image }}" - "{{ prometheus_container_image }}"
@ -142,16 +148,16 @@
changed_when: false changed_when: false
failed_when: false failed_when: false
- name: remove ceph-grafana-dashboards package on RedHat or SUSE - name: Remove ceph-grafana-dashboards package on RedHat or SUSE
package: ansible.builtin.package:
name: ceph-grafana-dashboards name: ceph-grafana-dashboards
state: absent state: absent
when: when:
- not containerized_deployment | bool - not containerized_deployment | bool
- ansible_facts['os_family'] in ['RedHat', 'Suse'] - ansible_facts['os_family'] in ['RedHat', 'Suse']
- name: remove data - name: Remove data
file: ansible.builtin.file:
name: "{{ item }}" name: "{{ item }}"
state: absent state: absent
loop: loop:
@ -162,7 +168,7 @@
- "{{ prometheus_data_dir }}" - "{{ prometheus_data_dir }}"
- /var/lib/grafana - /var/lib/grafana
- name: purge ceph dashboard - name: Purge ceph dashboard
hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}" hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}"
gather_facts: false gather_facts: false
become: true become: true
@ -170,14 +176,16 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: remove the dashboard admin user - name: Remove the dashboard admin user
ceph_dashboard_user: ceph_dashboard_user:
name: "{{ dashboard_admin_user }}" name: "{{ dashboard_admin_user }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -185,7 +193,7 @@
run_once: true run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
- name: remove radosgw system user - name: Remove radosgw system user
radosgw_user: radosgw_user:
name: "{{ dashboard_rgw_api_user_id }}" name: "{{ dashboard_rgw_api_user_id }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -194,7 +202,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: groups.get(rgw_group_name, []) | length > 0 when: groups.get(rgw_group_name, []) | length > 0
- name: disable mgr dashboard and prometheus modules - name: Disable mgr dashboard and prometheus modules
ceph_mgr_module: ceph_mgr_module:
name: "{{ item }}" name: "{{ item }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -205,8 +213,8 @@
- dashboard - dashboard
- prometheus - prometheus
- name: remove TLS certificate and key files - name: Remove TLS certificate and key files
file: ansible.builtin.file:
name: "/etc/ceph/ceph-dashboard.{{ item }}" name: "/etc/ceph/ceph-dashboard.{{ item }}"
state: absent state: absent
loop: loop:
@ -214,8 +222,8 @@
- key - key
when: dashboard_protocol == "https" when: dashboard_protocol == "https"
- name: remove ceph-mgr-dashboard package - name: Remove ceph-mgr-dashboard package
package: ansible.builtin.package:
name: ceph-mgr-dashboard name: ceph-mgr-dashboard
state: absent state: absent
when: not containerized_deployment | bool when: not containerized_deployment | bool

View File

@ -1,96 +1,97 @@
--- ---
- name: Confirm removal of the iSCSI gateway configuration - name: Confirm removal of the iSCSI gateway configuration
hosts: localhost hosts: localhost
vars_prompt: vars_prompt:
- name: purge_config - name: purge_config # noqa: name[casing]
prompt: Which configuration elements should be purged? (all, lio or abort) prompt: Which configuration elements should be purged? (all, lio or abort)
default: 'abort' default: 'abort'
private: no private: false
tasks: tasks:
- name: Exit playbook if user aborted the purge - name: Exit playbook if user aborted the purge
fail: ansible.builtin.fail:
msg: > msg: >
"You have aborted the purge of the iSCSI gateway configuration" "You have aborted the purge of the iSCSI gateway configuration"
when: purge_config == 'abort' when: purge_config == 'abort'
- name: set_fact igw_purge_type - name: Set_fact igw_purge_type
set_fact: ansible.builtin.set_fact:
igw_purge_type: "{{ purge_config }}" igw_purge_type: "{{ purge_config }}"
- name: stopping the gateways - name: Stopping the gateways
hosts: hosts:
- iscsigws - iscsigws
become: yes become: true
vars:
- igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
tasks: tasks:
- name: stopping and disabling iscsi daemons - name: Stopping and disabling iscsi daemons
service: ansible.builtin.service:
name: "{{ item }}" name: "{{ item }}"
state: stopped state: stopped
enabled: no enabled: false
with_items: with_items:
- rbd-target-gw - rbd-target-gw
- rbd-target-api - rbd-target-api
- tcmu-runner - tcmu-runner
- name: removing the gateway configuration - name: Removing the gateway configuration
hosts: hosts:
- iscsigws - iscsigws
become: yes become: true
vars: vars:
- igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}" igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
tasks: tasks:
- name: igw_purge | deleting configured rbd devices - name: Igw_purge | deleting configured rbd devices
igw_purge: mode="disks" igw_purge:
mode: "disks"
when: igw_purge_type == 'all' when: igw_purge_type == 'all'
run_once: true run_once: true
- name: igw_purge | purging the gateway configuration - name: Igw_purge | purging the gateway configuration
igw_purge: mode="gateway" igw_purge:
mode: "gateway"
run_once: true run_once: true
- name: restart and enable iscsi daemons - name: Restart and enable iscsi daemons
when: igw_purge_type == 'lio' when: igw_purge_type == 'lio'
service: ansible.builtin.service:
name: "{{ item }}" name: "{{ item }}"
state: started state: started
enabled: yes enabled: true
with_items: with_items:
- tcmu-runner - tcmu-runner
- rbd-target-api - rbd-target-api
- rbd-target-gw - rbd-target-gw
- name: remove the gateways from the ceph dashboard - name: Remove the gateways from the ceph dashboard
hosts: mons hosts: mons
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: iscsi gateways with ceph dashboard - name: Iscsi gateways with ceph dashboard
when: dashboard_enabled | bool when: dashboard_enabled | bool
run_once: true run_once: true
block: block:
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: set_fact container_exec_cmd - name: Set_fact container_exec_cmd
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: get iscsi gateway list - name: Get iscsi gateway list
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
changed_when: false changed_when: false
register: gateways register: gateways
- name: remove iscsi gateways - name: Remove iscsi gateways
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
with_items: '{{ (gateways.stdout | from_json)["gateways"] }}' with_items: '{{ (gateways.stdout | from_json)["gateways"] }}'
changed_when: false

View File

@ -12,54 +12,54 @@
# admin_secret_key # admin_secret_key
# #
# Additionally modify the users list and buckets list to create the # Additionally modify the users list and buckets list to create the
# users and buckets you want # users and buckets you want
# #
- name: add rgw users and buckets - name: Add rgw users and buckets
connection: local connection: local
hosts: localhost hosts: localhost
gather_facts: no gather_facts: false
tasks: tasks:
- name: add rgw users and buckets - name: Add rgw users and buckets
ceph_add_users_buckets: ceph_add_users_buckets:
rgw_host: '172.20.0.2' rgw_host: '172.20.0.2'
port: 8000 port: 8000
admin_access_key: '8W56BITCSX27CD555Z5B' admin_access_key: '8W56BITCSX27CD555Z5B'
admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20' admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20'
users: users:
- username: 'test1' - username: 'test1'
fullname: 'tester' fullname: 'tester'
email: 'dan1@email.com' email: 'dan1@email.com'
maxbucket: 666 maxbucket: 666
suspend: false suspend: false
autogenkey: false autogenkey: false
accesskey: 'B3AR4Q33L59YV56A9A2F' accesskey: 'B3AR4Q33L59YV56A9A2F'
secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
userquota: true userquota: true
usermaxsize: '1000' usermaxsize: '1000'
usermaxobjects: 3 usermaxobjects: 3
bucketquota: true bucketquota: true
bucketmaxsize: '1000' bucketmaxsize: '1000'
bucketmaxobjects: 3 bucketmaxobjects: 3
- username: 'test2' - username: 'test2'
fullname: 'tester' fullname: 'tester'
buckets: buckets:
- bucket: 'bucket1' - bucket: 'bucket1'
user: 'test2' user: 'test2'
- bucket: 'bucket2' - bucket: 'bucket2'
user: 'test1' user: 'test1'
- bucket: 'bucket3' - bucket: 'bucket3'
user: 'test1' user: 'test1'
- bucket: 'bucket4' - bucket: 'bucket4'
user: 'test1' user: 'test1'
- bucket: 'bucket5' - bucket: 'bucket5'
user: 'test1' user: 'test1'
- bucket: 'bucket6' - bucket: 'bucket6'
user: 'test2' user: 'test2'
- bucket: 'bucket7' - bucket: 'bucket7'
user: 'test2' user: 'test2'
- bucket: 'bucket8' - bucket: 'bucket8'
user: 'test2' user: 'test2'
- bucket: 'bucket9' - bucket: 'bucket9'
user: 'test2' user: 'test2'
- bucket: 'bucket10' - bucket: 'bucket10'
user: 'test2' user: 'test2'

File diff suppressed because it is too large Load Diff

View File

@ -9,35 +9,41 @@
# ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml # ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml
# Overrides the prompt using -e option. Can be used in # Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: hosts:
- "{{ mon_group_name | default('mons') }}" - "{{ mon_group_name | default('mons') }}"
- "{{ mds_group_name | default('mdss') }}" - "{{ mds_group_name | default('mdss') }}"
become: true become: true
tasks: tasks:
- debug: - name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: gather facts on all Ceph hosts for following reference msg: gather facts on all Ceph hosts for following reference
- import_role:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: perform checks, remove mds and print cluster health - name: Perform checks, remove mds and print cluster health
hosts: mons[0] hosts: mons[0]
become: true become: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster? prompt: Are you sure you want to shrink the cluster?
default: 'no' default: 'no'
private: no private: false
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: exit playbook, if no mds was given - name: Exit playbook, if no mds was given
when: mds_to_kill is not defined when: mds_to_kill is not defined
fail: ansible.builtin.fail:
msg: > msg: >
mds_to_kill must be declared. mds_to_kill must be declared.
Exiting shrink-cluster playbook, no MDS was removed. On the command Exiting shrink-cluster playbook, no MDS was removed. On the command
@ -45,106 +51,109 @@
"-e mds_to_kill=ceph-mds1" argument. You can only remove a single "-e mds_to_kill=ceph-mds1" argument. You can only remove a single
MDS each time the playbook runs." MDS each time the playbook runs."
- name: exit playbook, if the mds is not part of the inventory - name: Exit playbook, if the mds is not part of the inventory
when: mds_to_kill not in groups[mds_group_name] when: mds_to_kill not in groups[mds_group_name]
fail: ansible.builtin.fail:
msg: "It seems that the host given is not part of your inventory, msg: "It seems that the host given is not part of your inventory,
please make sure it is." please make sure it is."
- name: exit playbook, if user did not mean to shrink cluster - name: Exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
fail: ansible.builtin.fail:
msg: "Exiting shrink-mds playbook, no mds was removed. msg: "Exiting shrink-mds playbook, no mds was removed.
To shrink the cluster, either say 'yes' on the prompt or To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
- name: set_fact container_exec_cmd for mon0 - name: Set_fact container_exec_cmd for mon0
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
changed_when: false changed_when: false
register: ceph_health register: ceph_health
until: ceph_health is succeeded until: ceph_health is succeeded
retries: 5 retries: 5
delay: 2 delay: 2
- name: set_fact mds_to_kill_hostname - name: Set_fact mds_to_kill_hostname
set_fact: ansible.builtin.set_fact:
mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}" mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
tasks: tasks:
# get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also # get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
# removes the MDS from the FS map. # removes the MDS from the FS map.
- name: exit mds when containerized deployment - name: Exit mds when containerized deployment
command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
changed_when: false changed_when: false
when: containerized_deployment | bool when: containerized_deployment | bool
- name: get ceph status - name: Get ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status register: ceph_status
changed_when: false changed_when: false
- name: set_fact current_max_mds - name: Set_fact current_max_mds
set_fact: ansible.builtin.set_fact:
current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}" current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}"
- name: fail if removing that mds node wouldn't satisfy max_mds anymore - name: Fail if removing that mds node wouldn't satisfy max_mds anymore
fail: ansible.builtin.fail:
msg: "Can't remove more mds as it won't satisfy current max_mds setting" msg: "Can't remove more mds as it won't satisfy current max_mds setting"
when: when:
- ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int - ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int
- (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1 - (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1
- name: stop mds service and verify it - name: Stop mds service and verify it
block: block:
- name: stop mds service - name: Stop mds service
service: ansible.builtin.service:
name: ceph-mds@{{ mds_to_kill_hostname }} name: ceph-mds@{{ mds_to_kill_hostname }}
state: stopped state: stopped
enabled: no enabled: false
delegate_to: "{{ mds_to_kill }}" delegate_to: "{{ mds_to_kill }}"
failed_when: false failed_when: false
- name: ensure that the mds is stopped - name: Ensure that the mds is stopped
command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa 303 ansible.builtin.command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa command-instead-of-module
register: mds_to_kill_status register: mds_to_kill_status
failed_when: mds_to_kill_status.rc == 0 failed_when: mds_to_kill_status.rc == 0
delegate_to: "{{ mds_to_kill }}" delegate_to: "{{ mds_to_kill }}"
retries: 5 retries: 5
delay: 2 delay: 2
changed_when: false
- name: fail if the mds is reported as active or standby - name: Fail if the mds is reported as active or standby
block: block:
- name: get new ceph status - name: Get new ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status register: ceph_status
changed_when: false
- name: get active mds nodes list - name: Get active mds nodes list
set_fact: ansible.builtin.set_fact:
active_mdss: "{{ active_mdss | default([]) + [item.name] }}" active_mdss: "{{ active_mdss | default([]) + [item.name] }}"
with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}" with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}"
- name: get ceph fs dump status - name: Get ceph fs dump status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
register: ceph_fs_status register: ceph_fs_status
changed_when: false
- name: create a list of standby mdss - name: Create a list of standby mdss
set_fact: ansible.builtin.set_fact:
standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list
- name: fail if mds just killed is being reported as active or standby - name: Fail if mds just killed is being reported as active or standby
fail: ansible.builtin.fail:
msg: "mds node {{ mds_to_kill }} still up and running." msg: "mds node {{ mds_to_kill }} still up and running."
when: when:
- (mds_to_kill in active_mdss | default([])) or - (mds_to_kill in active_mdss | default([])) or
(mds_to_kill in standby_mdss | default([])) (mds_to_kill in standby_mdss | default([]))
- name: delete the filesystem when killing last mds - name: Delete the filesystem when killing last mds
ceph_fs: ceph_fs:
name: "{{ cephfs }}" name: "{{ cephfs }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -156,13 +165,13 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: purge mds store - name: Purge mds store
file: ansible.builtin.file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }} path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }}
state: absent state: absent
delegate_to: "{{ mds_to_kill }}" delegate_to: "{{ mds_to_kill }}"
post_tasks: post_tasks:
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false changed_when: false

View File

@ -11,62 +11,66 @@
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: hosts:
- "{{ mon_group_name | default('mons') }}" - "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}" - "{{ mgr_group_name | default('mgrs') }}"
become: true become: true
tasks: tasks:
- debug: - name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: gather facts on all Ceph hosts for following reference msg: gather facts on all Ceph hosts for following reference
- name: confirm if user really meant to remove manager from the ceph cluster - name: Confirm if user really meant to remove manager from the ceph cluster
hosts: mons[0] hosts: mons[0]
become: true become: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster? prompt: Are you sure you want to shrink the cluster?
default: 'no' default: 'no'
private: no private: false
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: set_fact container_exec_cmd - name: Set_fact container_exec_cmd
when: containerized_deployment | bool when: containerized_deployment | bool
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health register: ceph_health
changed_when: false changed_when: false
until: ceph_health is succeeded until: ceph_health is succeeded
retries: 5 retries: 5
delay: 2 delay: 2
- name: get total number of mgrs in cluster - name: Get total number of mgrs in cluster
block: block:
- name: save mgr dump output - name: Save mgr dump output
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump register: mgr_dump
changed_when: false
- name: get active and standbys mgr list - name: Get active and standbys mgr list
set_fact: ansible.builtin.set_fact:
active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}" active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}"
standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}" standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}"
- name: exit playbook, if there's no standby manager - name: Exit playbook, if there's no standby manager
fail: ansible.builtin.fail:
msg: "You are about to shrink the only manager present in the cluster." msg: "You are about to shrink the only manager present in the cluster."
when: standbys_mgr | length | int < 1 when: standbys_mgr | length | int < 1
- name: exit playbook, if no manager was given - name: Exit playbook, if no manager was given
fail: ansible.builtin.fail:
msg: "mgr_to_kill must be declared msg: "mgr_to_kill must be declared
Exiting shrink-cluster playbook, no manager was removed. Exiting shrink-cluster playbook, no manager was removed.
On the command line when invoking the playbook, you can use On the command line when invoking the playbook, you can use
@ -74,46 +78,47 @@
manager each time the playbook runs." manager each time the playbook runs."
when: mgr_to_kill is not defined when: mgr_to_kill is not defined
- name: exit playbook, if user did not mean to shrink cluster - name: Exit playbook, if user did not mean to shrink cluster
fail: ansible.builtin.fail:
msg: "Exiting shrink-mgr playbook, no manager was removed. msg: "Exiting shrink-mgr playbook, no manager was removed.
To shrink the cluster, either say 'yes' on the prompt or To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: set_fact mgr_to_kill_hostname - name: Set_fact mgr_to_kill_hostname
set_fact: ansible.builtin.set_fact:
mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}" mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
- name: exit playbook, if the selected manager is not present in the cluster - name: Exit playbook, if the selected manager is not present in the cluster
fail: ansible.builtin.fail:
msg: "It seems that the host given is not present in the cluster." msg: "It seems that the host given is not present in the cluster."
when: when:
- mgr_to_kill_hostname not in active_mgr - mgr_to_kill_hostname not in active_mgr
- mgr_to_kill_hostname not in standbys_mgr - mgr_to_kill_hostname not in standbys_mgr
tasks: tasks:
- name: stop manager services and verify it - name: Stop manager services and verify it
block: block:
- name: stop manager service - name: Stop manager service
service: ansible.builtin.service:
name: ceph-mgr@{{ mgr_to_kill_hostname }} name: ceph-mgr@{{ mgr_to_kill_hostname }}
state: stopped state: stopped
enabled: no enabled: false
delegate_to: "{{ mgr_to_kill }}" delegate_to: "{{ mgr_to_kill }}"
failed_when: false failed_when: false
- name: ensure that the mgr is stopped - name: Ensure that the mgr is stopped
command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa 303 ansible.builtin.command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa command-instead-of-module
register: mgr_to_kill_status register: mgr_to_kill_status
failed_when: mgr_to_kill_status.rc == 0 failed_when: mgr_to_kill_status.rc == 0
delegate_to: "{{ mgr_to_kill }}" delegate_to: "{{ mgr_to_kill }}"
changed_when: false
retries: 5 retries: 5
delay: 2 delay: 2
- name: fail if the mgr is reported in ceph mgr dump - name: Fail if the mgr is reported in ceph mgr dump
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump register: mgr_dump
changed_when: false changed_when: false
failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list) failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
@ -121,13 +126,13 @@
retries: 12 retries: 12
delay: 10 delay: 10
- name: purge manager store - name: Purge manager store
file: ansible.builtin.file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }} path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }}
state: absent state: absent
delegate_to: "{{ mgr_to_kill }}" delegate_to: "{{ mgr_to_kill }}"
post_tasks: post_tasks:
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false changed_when: false

View File

@ -12,75 +12,79 @@
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: "{{ mon_group_name|default('mons') }}" hosts: "{{ mon_group_name|default('mons') }}"
become: true become: true
tasks: tasks:
- debug: msg="gather facts on all Ceph hosts for following reference" - name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove monitor from the ceph cluster - name: Confirm whether user really meant to remove monitor from the ceph cluster
hosts: mons[0] hosts: mons[0]
become: true become: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster? prompt: Are you sure you want to shrink the cluster?
default: 'no' default: 'no'
private: no private: false
vars: vars:
mon_group_name: mons mon_group_name: mons
pre_tasks: pre_tasks:
- name: exit playbook, if only one monitor is present in cluster - name: Exit playbook, if only one monitor is present in cluster
fail: ansible.builtin.fail:
msg: "You are about to shrink the only monitor present in the cluster. msg: "You are about to shrink the only monitor present in the cluster.
If you really want to do that, please use the purge-cluster playbook." If you really want to do that, please use the purge-cluster playbook."
when: groups[mon_group_name] | length | int == 1 when: groups[mon_group_name] | length | int == 1
- name: exit playbook, if no monitor was given - name: Exit playbook, if no monitor was given
fail: ansible.builtin.fail:
msg: "mon_to_kill must be declared msg: "mon_to_kill must be declared
Exiting shrink-cluster playbook, no monitor was removed. Exiting shrink-cluster playbook, no monitor was removed.
On the command line when invoking the playbook, you can use On the command line when invoking the playbook, you can use
-e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs." -e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs."
when: mon_to_kill is not defined when: mon_to_kill is not defined
- name: exit playbook, if the monitor is not part of the inventory - name: Exit playbook, if the monitor is not part of the inventory
fail: ansible.builtin.fail:
msg: "It seems that the host given is not part of your inventory, please make sure it is." msg: "It seems that the host given is not part of your inventory, please make sure it is."
when: mon_to_kill not in groups[mon_group_name] when: mon_to_kill not in groups[mon_group_name]
- name: exit playbook, if user did not mean to shrink cluster - name: Exit playbook, if user did not mean to shrink cluster
fail: ansible.builtin.fail:
msg: "Exiting shrink-mon playbook, no monitor was removed. msg: "Exiting shrink-mon playbook, no monitor was removed.
To shrink the cluster, either say 'yes' on the prompt or To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
tasks: tasks:
- name: pick a monitor different than the one we want to remove - name: Pick a monitor different than the one we want to remove
set_fact: ansible.builtin.set_fact:
mon_host: "{{ item }}" mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}" with_items: "{{ groups[mon_group_name] }}"
when: item != mon_to_kill when: item != mon_to_kill
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)" - name: Set container_exec_cmd fact
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health register: ceph_health
changed_when: false changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1 until: ceph_health.stdout.find("HEALTH") > -1
@ -88,33 +92,33 @@
retries: 5 retries: 5
delay: 2 delay: 2
- name: set_fact mon_to_kill_hostname - name: Set_fact mon_to_kill_hostname
set_fact: ansible.builtin.set_fact:
mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}" mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
- name: stop monitor service(s) - name: Stop monitor service(s)
service: ansible.builtin.service:
name: ceph-mon@{{ mon_to_kill_hostname }} name: ceph-mon@{{ mon_to_kill_hostname }}
state: stopped state: stopped
enabled: no enabled: false
delegate_to: "{{ mon_to_kill }}" delegate_to: "{{ mon_to_kill }}"
failed_when: false failed_when: false
- name: purge monitor store - name: Purge monitor store
file: ansible.builtin.file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }} path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }}
state: absent state: absent
delegate_to: "{{ mon_to_kill }}" delegate_to: "{{ mon_to_kill }}"
- name: remove monitor from the quorum - name: Remove monitor from the quorum
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
changed_when: false changed_when: false
failed_when: false failed_when: false
delegate_to: "{{ mon_host }}" delegate_to: "{{ mon_host }}"
post_tasks: post_tasks:
- name: verify the monitor is out of the cluster - name: Verify the monitor is out of the cluster
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
delegate_to: "{{ mon_host }}" delegate_to: "{{ mon_host }}"
changed_when: false changed_when: false
failed_when: false failed_when: false
@ -123,25 +127,25 @@
retries: 2 retries: 2
delay: 10 delay: 10
- name: please remove the monitor from your ceph configuration file - name: Please remove the monitor from your ceph configuration file
debug: ansible.builtin.debug:
msg: "The monitor has been successfully removed from the cluster. msg: "The monitor has been successfully removed from the cluster.
Please remove the monitor entry from the rest of your ceph configuration files, cluster wide." Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
run_once: true run_once: true
when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names'] when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
- name: fail if monitor is still part of the cluster - name: Fail if monitor is still part of the cluster
fail: ansible.builtin.fail:
msg: "Monitor appears to still be part of the cluster, please check what happened." msg: "Monitor appears to still be part of the cluster, please check what happened."
run_once: true run_once: true
when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names'] when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names']
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ mon_host }}" delegate_to: "{{ mon_host }}"
changed_when: false changed_when: false
- name: show ceph mon status - name: Show ceph mon status
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
delegate_to: "{{ mon_host }}" delegate_to: "{{ mon_host }}"
changed_when: false changed_when: false

View File

@ -11,102 +11,101 @@
# Overrides the prompt using -e option. Can be used in # Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: hosts:
- mons - mons
- osds - osds
become: True
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove osd(s) from the cluster
hosts: mons[0]
become: true become: true
tasks:
- name: Gather facts on all Ceph hosts for following reference
ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
- name: Confirm whether user really meant to remove osd(s) from the cluster
hosts: mons[0]
become: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster? prompt: Are you sure you want to shrink the cluster?
default: 'no' default: 'no'
private: no private: false
vars: vars:
mon_group_name: mons mon_group_name: mons
osd_group_name: osds osd_group_name: osds
pre_tasks: pre_tasks:
- name: exit playbook, if user did not mean to shrink cluster - name: Exit playbook, if user did not mean to shrink cluster
fail: ansible.builtin.fail:
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed.. msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
To shrink the cluster, either say 'yes' on the prompt or To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: exit playbook, if no osd(s) was/were given - name: Exit playbook, if no osd(s) was/were given
fail: ansible.builtin.fail:
msg: "osd_to_kill must be declared msg: "osd_to_kill must be declared
Exiting shrink-osd playbook, no OSD(s) was/were removed. Exiting shrink-osd playbook, no OSD(s) was/were removed.
On the command line when invoking the playbook, you can use On the command line when invoking the playbook, you can use
-e osd_to_kill=0,1,2,3 argument." -e osd_to_kill=0,1,2,3 argument."
when: osd_to_kill is not defined when: osd_to_kill is not defined
- name: check the osd ids passed have the correct format - name: Check the osd ids passed have the correct format
fail: ansible.builtin.fail:
msg: "The id {{ item }} has wrong format, please pass the number only" msg: "The id {{ item }} has wrong format, please pass the number only"
with_items: "{{ osd_to_kill.split(',') }}" with_items: "{{ osd_to_kill.split(',') }}"
when: not item is regex("^\d+$") when: not item is regex("^\d+$")
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
post_tasks: post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized) - name: Set_fact container_exec_cmd build docker exec command (containerized)
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health register: ceph_health
changed_when: false changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1 until: ceph_health.stdout.find("HEALTH") > -1
retries: 5 retries: 5
delay: 2 delay: 2
- name: find the host(s) where the osd(s) is/are running on - name: Find the host(s) where the osd(s) is/are running on
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
changed_when: false changed_when: false
with_items: "{{ osd_to_kill.split(',') }}" with_items: "{{ osd_to_kill.split(',') }}"
register: find_osd_hosts register: find_osd_hosts
- name: set_fact osd_hosts - name: Set_fact osd_hosts
set_fact: ansible.builtin.set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item ] ] }}" osd_hosts: "{{ osd_hosts | default([]) + [[(item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item]] }}"
with_items: "{{ find_osd_hosts.results }}" with_items: "{{ find_osd_hosts.results }}"
- name: set_fact _osd_hosts - name: Set_fact _osd_hosts
set_fact: ansible.builtin.set_fact:
_osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}" _osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}"
with_nested: with_nested:
- "{{ groups.get(osd_group_name) }}" - "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1 when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: set_fact host_list - name: Set_fact host_list
set_fact: ansible.builtin.set_fact:
host_list: "{{ host_list | default([]) | union([item.0]) }}" host_list: "{{ host_list | default([]) | union([item.0]) }}"
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
- name: get ceph-volume lvm list data - name: Get ceph-volume lvm list data
ceph_volume: ceph_volume:
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
action: list action: list
@ -117,12 +116,12 @@
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
loop: "{{ host_list }}" loop: "{{ host_list }}"
- name: set_fact _lvm_list - name: Set_fact _lvm_list
set_fact: ansible.builtin.set_fact:
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}" _lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
with_items: "{{ _lvm_list_data.results }}" with_items: "{{ _lvm_list_data.results }}"
- name: refresh /etc/ceph/osd files non containerized_deployment - name: Refresh /etc/ceph/osd files non containerized_deployment
ceph_volume_simple_scan: ceph_volume_simple_scan:
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
force: true force: true
@ -130,8 +129,8 @@
loop: "{{ host_list }}" loop: "{{ host_list }}"
when: not containerized_deployment | bool when: not containerized_deployment | bool
- name: get osd unit status - name: Get osd unit status
systemd: ansible.builtin.systemd:
name: ceph-osd@{{ item.2 }} name: ceph-osd@{{ item.2 }}
register: osd_status register: osd_status
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
@ -139,8 +138,8 @@
when: when:
- containerized_deployment | bool - containerized_deployment | bool
- name: refresh /etc/ceph/osd files containerized_deployment - name: Refresh /etc/ceph/osd files containerized_deployment
command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" ansible.builtin.command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
changed_when: false changed_when: false
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
@ -149,10 +148,13 @@
- item.2 not in _lvm_list.keys() - item.2 not in _lvm_list.keys()
- osd_status.results[0].status.ActiveState == 'active' - osd_status.results[0].status.ActiveState == 'active'
- name: refresh /etc/ceph/osd files containerized_deployment when OSD container is down - name: Refresh /etc/ceph/osd files containerized_deployment when OSD container is down
when:
- containerized_deployment | bool
- osd_status.results[0].status.ActiveState != 'active'
block: block:
- name: create tmp osd folder - name: Create tmp osd folder
file: ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: directory state: directory
mode: '0755' mode: '0755'
@ -160,8 +162,8 @@
when: item.2 not in _lvm_list.keys() when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
- name: activate OSD - name: Activate OSD
command: | ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1 {{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro -v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared -v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
@ -179,8 +181,8 @@
when: item.2 not in _lvm_list.keys() when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
- name: simple scan - name: Simple scan
command: | ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1 {{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro -v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared -v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
@ -198,28 +200,24 @@
when: item.2 not in _lvm_list.keys() when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
- name: umount OSD temp folder - name: Umount OSD temp folder
mount: ansible.posix.mount:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: unmounted state: unmounted
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys() when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
- name: remove OSD temp folder - name: Remove OSD temp folder
file: ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: absent state: absent
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys() when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
when: - name: Find /etc/ceph/osd files
- containerized_deployment | bool ansible.builtin.find:
- osd_status.results[0].status.ActiveState != 'active'
- name: find /etc/ceph/osd files
find:
paths: /etc/ceph/osd paths: /etc/ceph/osd
pattern: "{{ item.2 }}-*" pattern: "{{ item.2 }}-*"
register: ceph_osd_data register: ceph_osd_data
@ -227,8 +225,8 @@
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
when: item.2 not in _lvm_list.keys() when: item.2 not in _lvm_list.keys()
- name: slurp ceph osd files content - name: Slurp ceph osd files content
slurp: ansible.builtin.slurp:
src: "{{ item['files'][0]['path'] }}" src: "{{ item['files'][0]['path'] }}"
delegate_to: "{{ item.item.0 }}" delegate_to: "{{ item.item.0 }}"
register: ceph_osd_files_content register: ceph_osd_files_content
@ -237,13 +235,13 @@
- item.skipped is undefined - item.skipped is undefined
- item.matched > 0 - item.matched > 0
- name: set_fact ceph_osd_files_json - name: Set_fact ceph_osd_files_json
set_fact: ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}" ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}"
with_items: "{{ ceph_osd_files_content.results }}" with_items: "{{ ceph_osd_files_content.results }}"
when: item.skipped is undefined when: item.skipped is undefined
- name: mark osd(s) out of the cluster - name: Mark osd(s) out of the cluster
ceph_osd: ceph_osd:
ids: "{{ osd_to_kill.split(',') }}" ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -253,15 +251,15 @@
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true run_once: true
- name: stop osd(s) service - name: Stop osd(s) service
service: ansible.builtin.service:
name: ceph-osd@{{ item.2 }} name: ceph-osd@{{ item.2 }}
state: stopped state: stopped
enabled: no enabled: false
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
- name: umount osd lockbox - name: Umount osd lockbox
ansible.posix.mount: ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}" path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: absent state: absent
@ -273,7 +271,7 @@
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2]['data']['uuid'] is defined - ceph_osd_data_json[item.2]['data']['uuid'] is defined
- name: umount osd data - name: Umount osd data
ansible.posix.mount: ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent state: absent
@ -281,36 +279,38 @@
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool when: not containerized_deployment | bool
- name: get parent device for data partition - name: Get parent device for data partition
command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}" ansible.builtin.command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
register: parent_device_data_part register: parent_device_data_part
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
changed_when: false
when: when:
- item.2 not in _lvm_list.keys() - item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['data']['path'] is defined - ceph_osd_data_json[item.2]['data']['path'] is defined
- name: add pkname information in ceph_osd_data_json - name: Add pkname information in ceph_osd_data_json
set_fact: ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout }}, recursive=True) }}" ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout}}, recursive=True) }}"
loop: "{{ parent_device_data_part.results }}" loop: "{{ parent_device_data_part.results }}"
when: item.skipped is undefined when: item.skipped is undefined
- name: close dmcrypt close on devices if needed - name: Close dmcrypt close on devices if needed
command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}" ansible.builtin.command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
with_nested: with_nested:
- "{{ _osd_hosts }}" - "{{ _osd_hosts }}"
- [ 'block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt' ] - ['block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt']
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
failed_when: false failed_when: false
register: result register: result
until: result is succeeded until: result is succeeded
changed_when: false
when: when:
- item.2 not in _lvm_list.keys() - item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2][item.3] is defined - ceph_osd_data_json[item.2][item.3] is defined
- name: use ceph-volume lvm zap to destroy all partitions - name: Use ceph-volume lvm zap to destroy all partitions
ceph_volume: ceph_volume:
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
action: zap action: zap
@ -321,7 +321,7 @@
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_nested: with_nested:
- "{{ _osd_hosts }}" - "{{ _osd_hosts }}"
- [ 'block', 'block.db', 'block.wal', 'journal', 'data' ] - ['block', 'block.db', 'block.wal', 'journal', 'data']
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
failed_when: false failed_when: false
register: result register: result
@ -329,7 +329,7 @@
- item.2 not in _lvm_list.keys() - item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2][item.3] is defined - ceph_osd_data_json[item.2][item.3] is defined
- name: zap osd devices - name: Zap osd devices
ceph_volume: ceph_volume:
action: "zap" action: "zap"
osd_fsid: "{{ item.1 }}" osd_fsid: "{{ item.1 }}"
@ -341,7 +341,7 @@
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
when: item.2 in _lvm_list.keys() when: item.2 in _lvm_list.keys()
- name: ensure osds are marked down - name: Ensure osds are marked down
ceph_osd: ceph_osd:
ids: "{{ osd_to_kill.split(',') }}" ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -352,7 +352,7 @@
run_once: true run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
- name: purge osd(s) from the cluster - name: Purge osd(s) from the cluster
ceph_osd: ceph_osd:
ids: "{{ item }}" ids: "{{ item }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -363,17 +363,17 @@
run_once: true run_once: true
with_items: "{{ osd_to_kill.split(',') }}" with_items: "{{ osd_to_kill.split(',') }}"
- name: remove osd data dir - name: Remove osd data dir
file: ansible.builtin.file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent state: absent
loop: "{{ _osd_hosts }}" loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
changed_when: false changed_when: false
- name: show ceph osd tree - name: Show ceph osd tree
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
changed_when: false changed_when: false

View File

@ -11,34 +11,37 @@
# Overrides the prompt using -e option. Can be used in # Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: hosts:
- mons - mons
- rbdmirrors - rbdmirrors
become: true become: true
tasks: tasks:
- debug: - name: Gather facts on MONs and RBD mirrors
ansible.builtin.debug:
msg: gather facts on MONs and RBD mirrors msg: gather facts on MONs and RBD mirrors
- name: confirm whether user really meant to remove rbd mirror from the ceph - name: Confirm whether user really meant to remove rbd mirror from the ceph
cluster cluster
hosts: mons[0] hosts: mons[0]
become: true become: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster? prompt: Are you sure you want to shrink the cluster?
default: 'no' default: 'no'
private: no private: false
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: exit playbook, if no rbdmirror was given - name: Exit playbook, if no rbdmirror was given
fail: ansible.builtin.fail:
msg: "rbdmirror_to_kill must be declared msg: "rbdmirror_to_kill must be declared
Exiting shrink-cluster playbook, no RBD mirror was removed. Exiting shrink-cluster playbook, no RBD mirror was removed.
On the command line when invoking the playbook, you can use On the command line when invoking the playbook, you can use
@ -46,68 +49,68 @@
single rbd mirror each time the playbook runs." single rbd mirror each time the playbook runs."
when: rbdmirror_to_kill is not defined when: rbdmirror_to_kill is not defined
- name: exit playbook, if the rbdmirror is not part of the inventory - name: Exit playbook, if the rbdmirror is not part of the inventory
fail: ansible.builtin.fail:
msg: > msg: >
It seems that the host given is not part of your inventory, It seems that the host given is not part of your inventory,
please make sure it is. please make sure it is.
when: rbdmirror_to_kill not in groups[rbdmirror_group_name] when: rbdmirror_to_kill not in groups[rbdmirror_group_name]
- name: exit playbook, if user did not mean to shrink cluster - name: Exit playbook, if user did not mean to shrink cluster
fail: ansible.builtin.fail:
msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed. msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed.
To shrink the cluster, either say 'yes' on the prompt or To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: set_fact container_exec_cmd for mon0 - name: Set_fact container_exec_cmd for mon0
when: containerized_deployment | bool when: containerized_deployment | bool
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health register: ceph_health
changed_when: false changed_when: false
until: ceph_health is succeeded until: ceph_health is succeeded
retries: 5 retries: 5
delay: 2 delay: 2
- name: set_fact rbdmirror_to_kill_hostname - name: Set_fact rbdmirror_to_kill_hostname
set_fact: ansible.builtin.set_fact:
rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}" rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
- name: set_fact rbdmirror_gids - name: Set_fact rbdmirror_gids
set_fact: ansible.builtin.set_fact:
rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}" rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [item] }}"
with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}" with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}"
when: item != 'summary' when: item != 'summary'
- name: set_fact rbdmirror_to_kill_gid - name: Set_fact rbdmirror_to_kill_gid
set_fact: ansible.builtin.set_fact:
rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}" rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}"
with_items: "{{ rbdmirror_gids }}" with_items: "{{ rbdmirror_gids }}"
when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
tasks: tasks:
- name: stop rbdmirror service - name: Stop rbdmirror service
service: ansible.builtin.service:
name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }} name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }}
state: stopped state: stopped
enabled: no enabled: false
delegate_to: "{{ rbdmirror_to_kill }}" delegate_to: "{{ rbdmirror_to_kill }}"
failed_when: false failed_when: false
- name: purge related directories - name: Purge related directories
file: ansible.builtin.file:
path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }} path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }}
state: absent state: absent
delegate_to: "{{ rbdmirror_to_kill }}" delegate_to: "{{ rbdmirror_to_kill }}"
post_tasks: post_tasks:
- name: get servicemap details - name: Get servicemap details
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health register: ceph_health
failed_when: failed_when:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list" - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
@ -115,10 +118,11 @@
until: until:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list" - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
- rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
changed_when: false
when: rbdmirror_to_kill_gid is defined when: rbdmirror_to_kill_gid is defined
retries: 12 retries: 12
delay: 10 delay: 10
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false changed_when: false

View File

@ -11,19 +11,19 @@
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: confirm whether user really meant to remove rgw from the ceph cluster - name: Confirm whether user really meant to remove rgw from the ceph cluster
hosts: localhost hosts: localhost
become: false become: false
gather_facts: false gather_facts: false
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster? prompt: Are you sure you want to shrink the cluster?
default: 'no' default: 'no'
private: no private: false
tasks: tasks:
- name: exit playbook, if no rgw was given - name: Exit playbook, if no rgw was given
when: rgw_to_kill is not defined or rgw_to_kill | length == 0 when: rgw_to_kill is not defined or rgw_to_kill | length == 0
fail: ansible.builtin.fail:
msg: > msg: >
rgw_to_kill must be declared. rgw_to_kill must be declared.
Exiting shrink-cluster playbook, no RGW was removed. On the command Exiting shrink-cluster playbook, no RGW was removed. On the command
@ -31,82 +31,85 @@
"-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single "-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single
RGW each time the playbook runs. RGW each time the playbook runs.
- name: exit playbook, if user did not mean to shrink cluster - name: Exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
fail: ansible.builtin.fail:
msg: > msg: >
Exiting shrink-mon playbook, no monitor was removed. To shrink the Exiting shrink-mon playbook, no monitor was removed. To shrink the
cluster, either say 'yes' on the prompt or use cluster, either say 'yes' on the prompt or use
'-e ireallymeanit=yes' on the command line when invoking the playbook '-e ireallymeanit=yes' on the command line when invoking the playbook
- name: gather facts and mons and rgws - name: Gather facts and mons and rgws
hosts: hosts:
- "{{ mon_group_name | default('mons') }}[0]" - "{{ mon_group_name | default('mons') }}[0]"
- "{{ rgw_group_name | default('rgws') }}" - "{{ rgw_group_name | default('rgws') }}"
become: true become: true
gather_facts: false gather_facts: false
tasks: tasks:
- name: gather facts - name: Gather facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
- '!ohai' - '!ohai'
- hosts: mons[0] - name: Shrink rgw service
hosts: mons[0]
become: true become: true
gather_facts: false gather_facts: false
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary tasks_from: container_binary
- name: set_fact container_exec_cmd for mon0 - name: Set_fact container_exec_cmd for mon0
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health register: ceph_health
changed_when: false changed_when: false
until: ceph_health is succeeded until: ceph_health is succeeded
retries: 5 retries: 5
delay: 2 delay: 2
- name: get rgw instances - name: Get rgw instances
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: rgw_instances register: rgw_instances
changed_when: false changed_when: false
- name: exit playbook, if the rgw_to_kill doesn't exist - name: Exit playbook, if the rgw_to_kill doesn't exist
when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
fail: ansible.builtin.fail:
msg: > msg: >
It seems that the rgw instance given is not part of the ceph cluster. Please It seems that the rgw instance given is not part of the ceph cluster. Please
make sure it is. make sure it is.
The rgw instance format is $(hostname}.rgw$(instance number). The rgw instance format is $(hostname}.rgw$(instance number).
tasks: tasks:
- name: get rgw host running the rgw instance to kill - name: Get rgw host running the rgw instance to kill
set_fact: ansible.builtin.set_fact:
rgw_host: '{{ item }}' rgw_host: '{{ item }}'
with_items: '{{ groups[rgw_group_name] }}' with_items: '{{ groups[rgw_group_name] }}'
when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0] when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
- name: stop rgw service - name: Stop rgw service
service: ansible.builtin.service:
name: ceph-radosgw@rgw.{{ rgw_to_kill }} name: ceph-radosgw@rgw.{{ rgw_to_kill }}
state: stopped state: stopped
enabled: no enabled: false
delegate_to: "{{ rgw_host }}" delegate_to: "{{ rgw_host }}"
failed_when: false failed_when: false
- name: ensure that the rgw is stopped - name: Ensure that the rgw is stopped
command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa 303 ansible.builtin.command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa command-instead-of-module
register: rgw_to_kill_status register: rgw_to_kill_status
failed_when: rgw_to_kill_status.rc == 0 failed_when: rgw_to_kill_status.rc == 0
changed_when: false changed_when: false
@ -114,8 +117,8 @@
retries: 5 retries: 5
delay: 2 delay: 2
- name: exit if rgw_to_kill is reported in ceph status - name: Exit if rgw_to_kill is reported in ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: ceph_status register: ceph_status
changed_when: false changed_when: false
failed_when: failed_when:
@ -127,12 +130,12 @@
retries: 3 retries: 3
delay: 3 delay: 3
- name: purge directories related to rgw - name: Purge directories related to rgw
file: ansible.builtin.file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }} path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }}
state: absent state: absent
delegate_to: "{{ rgw_host }}" delegate_to: "{{ rgw_host }}"
post_tasks: post_tasks:
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false changed_when: false

View File

@ -5,26 +5,23 @@
# Usage: # Usage:
# ansible-playbook storage-inventory.yml # ansible-playbook storage-inventory.yml
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: osds
hosts: "{{ osd_group_name|default('osds') }}"
become: true become: true
tasks: tasks:
- debug: msg="gather facts on all Ceph hosts for following reference" - name: Gather facts on all Ceph hosts
ansible.builtin.debug:
- name: query each host for storage device inventory msg: "gather facts on all Ceph hosts for following reference"
hosts: "{{ osd_group_name|default('osds') }}"
- name: Query each host for storage device inventory
hosts: osds
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: list storage inventory - name: List storage inventory
ceph_volume: ceph_volume:
action: "inventory" action: "inventory"
environment: environment:

View File

@ -1,29 +1,30 @@
--- ---
# This playbook switches from non-containerized to containerized Ceph daemons # This playbook switches from non-containerized to containerized Ceph daemons
- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons - name: Confirm whether user really meant to switch from non-containerized to containerized ceph daemons
hosts: localhost hosts: localhost
gather_facts: false gather_facts: false
any_errors_fatal: true any_errors_fatal: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons? prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons?
default: 'no' default: 'no'
private: no private: false
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: fail when less than three monitors - name: Fail when less than three monitors
fail: ansible.builtin.fail:
msg: "This playbook requires at least three monitors." msg: "This playbook requires at least three monitors."
when: groups[mon_group_name] | length | int < 3 when: groups[mon_group_name] | length | int < 3
- name: exit playbook, if user did not mean to switch from non-containerized to containerized daemons? - name: Exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
fail: ansible.builtin.fail:
msg: > msg: >
"Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook, "Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook,
cluster did not switch from non-containerized to containerized ceph daemons. cluster did not switch from non-containerized to containerized ceph daemons.
@ -33,7 +34,7 @@
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: gather facts - name: Gather facts
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
@ -47,52 +48,57 @@
become: true become: true
vars: vars:
delegate_facts_host: True delegate_facts_host: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: gather and delegate facts - name: Gather and delegate facts
setup: ansible.builtin.setup:
gather_subset: gather_subset:
- 'all' - 'all'
- '!facter' - '!facter'
- '!ohai' - '!ohai'
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
delegate_facts: True delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}" with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}"
run_once: true run_once: true
when: delegate_facts_host | bool when: delegate_facts_host | bool
tags: always tags: always
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role:
- name: Import ceph-validate role
ansible.builtin.import_role:
name: ceph-validate name: ceph-validate
- name: switching from non-containerized to containerized ceph mon - name: Switching from non-containerized to containerized ceph mon
vars: vars:
containerized_deployment: true containerized_deployment: true
switch_to_containers: True switch_to_containers: true
mon_group_name: mons mon_group_name: mons
hosts: "{{ mon_group_name|default('mons') }}" hosts: "{{ mon_group_name|default('mons') }}"
serial: 1 serial: 1
become: true become: true
pre_tasks: pre_tasks:
- name: select a running monitor - name: Select a running monitor
set_fact: mon_host={{ item }} ansible.builtin.set_fact:
mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}" with_items: "{{ groups[mon_group_name] }}"
when: item != inventory_hostname when: item != inventory_hostname
- name: stop non-containerized ceph mon - name: Stop non-containerized ceph mon
service: ansible.builtin.service:
name: "ceph-mon@{{ ansible_facts['hostname'] }}" name: "ceph-mon@{{ ansible_facts['hostname'] }}"
state: stopped state: stopped
enabled: no enabled: false
- name: remove old systemd unit files - name: Remove old systemd unit files
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
@ -101,61 +107,67 @@
- /lib/systemd/system/ceph-mon@.service - /lib/systemd/system/ceph-mon@.service
- /lib/systemd/system/ceph-mon.target - /lib/systemd/system/ceph-mon.target
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
- name: check for existing old leveldb file extension (ldb) - name: Check for existing old leveldb file extension (ldb)
shell: stat /var/lib/ceph/mon/*/store.db/*.ldb ansible.builtin.shell: stat /var/lib/ceph/mon/*/store.db/*.ldb
changed_when: false changed_when: false
failed_when: false failed_when: false
register: ldb_files register: ldb_files
- name: rename leveldb extension from ldb to sst - name: Rename leveldb extension from ldb to sst
shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb
changed_when: false changed_when: false
failed_when: false failed_when: false
when: ldb_files.rc == 0 when: ldb_files.rc == 0
- name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common - name: Copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring ansible.builtin.command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
args: args:
creates: /etc/ceph/{{ cluster }}.mon.keyring creates: /etc/ceph/{{ cluster }}.mon.keyring
changed_when: false changed_when: false
failed_when: false failed_when: false
tasks: tasks:
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-mon role
ansible.builtin.import_role:
name: ceph-mon name: ceph-mon
post_tasks: post_tasks:
- name: waiting for the monitor to join the quorum... - name: Waiting for the monitor to join the quorum...
command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json" ansible.builtin.command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
register: ceph_health_raw register: ceph_health_raw
until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"] until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
changed_when: false changed_when: false
retries: "{{ health_mon_check_retries }}" retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}" delay: "{{ health_mon_check_delay }}"
- name: switching from non-containerized to containerized ceph mgr - name: Switching from non-containerized to containerized ceph mgr
hosts: "{{ mgr_group_name|default('mgrs') }}" hosts: "{{ mgr_group_name|default('mgrs') }}"
@ -169,15 +181,15 @@
# failed_when: false is here because if we're # failed_when: false is here because if we're
# working with a jewel cluster then ceph mgr # working with a jewel cluster then ceph mgr
# will not exist # will not exist
- name: stop non-containerized ceph mgr(s) - name: Stop non-containerized ceph mgr(s)
service: ansible.builtin.service:
name: "ceph-mgr@{{ ansible_facts['hostname'] }}" name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped state: stopped
enabled: no enabled: false
failed_when: false failed_when: false
- name: remove old systemd unit files - name: Remove old systemd unit files
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
@ -186,66 +198,75 @@
- /lib/systemd/system/ceph-mgr@.service - /lib/systemd/system/ceph-mgr@.service
- /lib/systemd/system/ceph-mgr.target - /lib/systemd/system/ceph-mgr.target
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
tasks: tasks:
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-mgr role
ansible.builtin.import_role:
name: ceph-mgr name: ceph-mgr
- name: set osd flags - name: Set osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]" hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary.yml tasks_from: container_binary.yml
- name: get pool list - name: Get pool list
command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list register: pool_list
changed_when: false changed_when: false
check_mode: false check_mode: false
- name: get balancer module status - name: Get balancer module status
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_switch register: balancer_status_switch
changed_when: false changed_when: false
check_mode: false check_mode: false
- name: set_fact pools_pgautoscaler_mode - name: Set_fact pools_pgautoscaler_mode
set_fact: ansible.builtin.set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
with_items: "{{ pool_list.stdout | default('{}') | from_json }}" with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- name: disable balancer - name: Disable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false changed_when: false
when: (balancer_status_switch.stdout | from_json)['active'] | bool when: (balancer_status_switch.stdout | from_json)['active'] | bool
- name: disable pg autoscale on pools - name: Disable pg autoscale on pools
ceph_pool: ceph_pool:
name: "{{ item.name }}" name: "{{ item.name }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -258,7 +279,7 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: set osd flags - name: Set osd flags
ceph_osd_flag: ceph_osd_flag:
name: "{{ item }}" name: "{{ item }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -270,12 +291,12 @@
- nodeep-scrub - nodeep-scrub
- name: switching from non-containerized to containerized ceph osd - name: Switching from non-containerized to containerized ceph osd
vars: vars:
containerized_deployment: true containerized_deployment: true
osd_group_name: osds osd_group_name: osds
switch_to_containers: True switch_to_containers: true
hosts: "{{ osd_group_name|default('osds') }}" hosts: "{{ osd_group_name|default('osds') }}"
@ -283,11 +304,12 @@
become: true become: true
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: collect running osds - name: Collect running osds
shell: | ansible.builtin.shell: |
set -o pipefail; set -o pipefail;
systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume' systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume'
register: running_osds register: running_osds
@ -295,28 +317,28 @@
failed_when: false failed_when: false
# systemd module does not support --runtime option # systemd module does not support --runtime option
- name: disable ceph-osd@.service runtime-enabled - name: Disable ceph-osd@.service runtime-enabled
command: "systemctl disable --runtime {{ item }}" # noqa 303 ansible.builtin.command: "systemctl disable --runtime {{ item }}" # noqa command-instead-of-module
changed_when: false changed_when: false
failed_when: false failed_when: false
with_items: "{{ running_osds.stdout_lines | default([]) }}" with_items: "{{ running_osds.stdout_lines | default([]) }}"
when: item.startswith('ceph-osd@') when: item.startswith('ceph-osd@')
- name: stop/disable/mask non-containerized ceph osd(s) (if any) - name: Stop/disable/mask non-containerized ceph osd(s) (if any)
systemd: ansible.builtin.systemd:
name: "{{ item }}" name: "{{ item }}"
state: stopped state: stopped
enabled: no enabled: false
with_items: "{{ running_osds.stdout_lines | default([]) }}" with_items: "{{ running_osds.stdout_lines | default([]) }}"
when: running_osds != [] when: running_osds != []
- name: disable ceph.target - name: Disable ceph.target
systemd: ansible.builtin.systemd:
name: ceph.target name: ceph.target
enabled: no enabled: false
- name: remove old ceph-osd systemd units - name: Remove old ceph-osd systemd units
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
@ -327,44 +349,45 @@
- /lib/systemd/system/ceph-osd@.service - /lib/systemd/system/ceph-osd@.service
- /lib/systemd/system/ceph-volume@.service - /lib/systemd/system/ceph-volume@.service
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
- name: check for existing old leveldb file extension (ldb) - name: Check for existing old leveldb file extension (ldb)
shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb ansible.builtin.shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false changed_when: false
failed_when: false failed_when: false
register: ldb_files register: ldb_files
- name: rename leveldb extension from ldb to sst - name: Rename leveldb extension from ldb to sst
shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false changed_when: false
failed_when: false failed_when: false
when: ldb_files.rc == 0 when: ldb_files.rc == 0
- name: check if containerized osds are already running - name: Check if containerized osds are already running
command: > ansible.builtin.command: >
{{ container_binary }} ps -q --filter='name=ceph-osd' {{ container_binary }} ps -q --filter='name=ceph-osd'
changed_when: false changed_when: false
failed_when: false failed_when: false
register: osd_running register: osd_running
- name: get osd directories - name: Get osd directories
command: > ansible.builtin.command: >
find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d
register: osd_dirs register: osd_dirs
changed_when: false changed_when: false
failed_when: false failed_when: false
- name: unmount all the osd directories - name: Unmount all the osd directories
command: > ansible.builtin.command: >
umount {{ item }} umount {{ item }}
changed_when: false changed_when: false
failed_when: false failed_when: false
@ -372,21 +395,25 @@
when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0 when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0
tasks: tasks:
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-osd role
ansible.builtin.import_role:
name: ceph-osd name: ceph-osd
post_tasks: post_tasks:
- name: container - waiting for clean pgs... - name: Container - waiting for clean pgs...
command: > ansible.builtin.command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
register: ceph_health_post register: ceph_health_post
until: > until: >
@ -399,17 +426,20 @@
changed_when: false changed_when: false
- name: unset osd flags - name: Unset osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]" hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary.yml tasks_from: container_binary.yml
- name: re-enable pg autoscale on pools - name: Re-enable pg autoscale on pools
ceph_pool: ceph_pool:
name: "{{ item.name }}" name: "{{ item.name }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -422,7 +452,7 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: unset osd flags - name: Unset osd flags
ceph_osd_flag: ceph_osd_flag:
name: "{{ item }}" name: "{{ item }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -434,13 +464,13 @@
- noout - noout
- nodeep-scrub - nodeep-scrub
- name: re-enable balancer - name: Re-enable balancer
command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false changed_when: false
when: (balancer_status_switch.stdout | from_json)['active'] | bool when: (balancer_status_switch.stdout | from_json)['active'] | bool
- name: switching from non-containerized to containerized ceph mds - name: Switching from non-containerized to containerized ceph mds
hosts: "{{ mds_group_name|default('mdss') }}" hosts: "{{ mds_group_name|default('mdss') }}"
@ -452,14 +482,14 @@
become: true become: true
pre_tasks: pre_tasks:
- name: stop non-containerized ceph mds(s) - name: Stop non-containerized ceph mds(s)
service: ansible.builtin.service:
name: "ceph-mds@{{ ansible_facts['hostname'] }}" name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped state: stopped
enabled: no enabled: false
- name: remove old systemd unit files - name: Remove old systemd unit files
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
@ -468,34 +498,40 @@
- /lib/systemd/system/ceph-mds@.service - /lib/systemd/system/ceph-mds@.service
- /lib/systemd/system/ceph-mds.target - /lib/systemd/system/ceph-mds.target
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
tasks: tasks:
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-mds role
name: ceph-mds ansible.builtin.import_role:
name: ceph-mds
- name: switching from non-containerized to containerized ceph rgw - name: Switching from non-containerized to containerized ceph rgw
hosts: "{{ rgw_group_name|default('rgws') }}" hosts: "{{ rgw_group_name|default('rgws') }}"
@ -506,33 +542,36 @@
serial: 1 serial: 1
become: true become: true
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role: - name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config name: ceph-config
tasks_from: rgw_systemd_environment_file.yml tasks_from: rgw_systemd_environment_file.yml
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
tasks: tasks:
- name: stop non-containerized ceph rgw(s) - name: Stop non-containerized ceph rgw(s)
service: ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped state: stopped
enabled: no enabled: false
with_items: "{{ rgw_instances }}" with_items: "{{ rgw_instances }}"
- name: remove old systemd unit files - name: Remove old systemd unit files
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
@ -541,20 +580,24 @@
- /lib/systemd/system/ceph-radosgw@.service - /lib/systemd/system/ceph-radosgw@.service
- /lib/systemd/system/ceph-radosgw.target - /lib/systemd/system/ceph-radosgw.target
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-rgw role
ansible.builtin.import_role:
name: ceph-rgw name: ceph-rgw
- name: switching from non-containerized to containerized ceph rbd-mirror - name: Switching from non-containerized to containerized ceph rbd-mirror
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
@ -565,21 +608,21 @@
serial: 1 serial: 1
become: true become: true
pre_tasks: pre_tasks:
- name: check for ceph rbd mirror services - name: Check for ceph rbd mirror services
command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa 303 ansible.builtin.command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa: command-instead-of-module
changed_when: false changed_when: false
register: rbdmirror_services register: rbdmirror_services
- name: stop non-containerized ceph rbd mirror(s) - name: Stop non-containerized ceph rbd mirror(s) # noqa: ignore-errors
service: ansible.builtin.service:
name: "{{ item.split('=')[1] }}" name: "{{ item.split('=')[1] }}"
state: stopped state: stopped
enabled: no enabled: false
ignore_errors: true ignore_errors: true
loop: "{{ rbdmirror_services.stdout_lines }}" loop: "{{ rbdmirror_services.stdout_lines }}"
- name: remove old systemd unit files - name: Remove old systemd unit files
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
@ -588,34 +631,40 @@
- /lib/systemd/system/ceph-rbd-mirror@.service - /lib/systemd/system/ceph-rbd-mirror@.service
- /lib/systemd/system/ceph-rbd-mirror.target - /lib/systemd/system/ceph-rbd-mirror.target
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
tasks: tasks:
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-rbd-mirror role
ansible.builtin.import_role:
name: ceph-rbd-mirror name: ceph-rbd-mirror
- name: switching from non-containerized to containerized ceph nfs - name: Switching from non-containerized to containerized ceph nfs
hosts: "{{ nfs_group_name|default('nfss') }}" hosts: "{{ nfs_group_name|default('nfss') }}"
@ -630,40 +679,46 @@
# failed_when: false is here because if we're # failed_when: false is here because if we're
# working with a jewel cluster then ceph nfs # working with a jewel cluster then ceph nfs
# will not exist # will not exist
- name: stop non-containerized ceph nfs(s) - name: Stop non-containerized ceph nfs(s)
service: ansible.builtin.service:
name: nfs-ganesha name: nfs-ganesha
state: stopped state: stopped
enabled: no enabled: false
failed_when: false failed_when: false
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
tasks: tasks:
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-nfs role
ansible.builtin.import_role:
name: ceph-nfs name: ceph-nfs
- name: switching from non-containerized to containerized iscsigws - name: Switching from non-containerized to containerized iscsigws
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}" hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
vars: vars:
containerized_deployment: true containerized_deployment: true
@ -671,21 +726,22 @@
become: true become: true
serial: 1 serial: 1
pre_tasks: pre_tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- name: stop iscsigw services - name: Stop iscsigw services
service: ansible.builtin.service:
name: "{{ item }}" name: "{{ item }}"
state: stopped state: stopped
enabled: no enabled: false
with_items: with_items:
- tcmu-runner - tcmu-runner
- rbd-target-gw - rbd-target-gw
- rbd-target-api - rbd-target-api
- name: remove old systemd unit files - name: Remove old systemd unit files
file: ansible.builtin.file:
path: "/usr/lib/systemd/system/{{ item }}.service" path: "/usr/lib/systemd/system/{{ item }}.service"
state: absent state: absent
with_items: with_items:
@ -693,29 +749,34 @@
- rbd-target-gw - rbd-target-gw
- rbd-target-api - rbd-target-api
tasks: tasks:
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false
- import_role: - name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine name: ceph-container-engine
- import_role: - name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common name: ceph-container-common
- import_role: - name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw name: ceph-iscsi-gw
- name: switching from non-containerized to containerized ceph-crash - name: Switching from non-containerized to containerized ceph-crash
hosts: hosts:
- "{{ mon_group_name | default('mons') }}" - "{{ mon_group_name | default('mons') }}"
@ -729,26 +790,30 @@
containerized_deployment: true containerized_deployment: true
become: true become: true
tasks: tasks:
- name: stop non-containerized ceph-crash - name: Stop non-containerized ceph-crash
service: ansible.builtin.service:
name: ceph-crash name: ceph-crash
state: stopped state: stopped
enabled: no enabled: false
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary.yml tasks_from: container_binary.yml
- import_role: - name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler name: ceph-handler
- import_role: - name: Import ceph-crash role
ansible.builtin.import_role:
name: ceph-crash name: ceph-crash
- name: final task - name: Final task
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ mgr_group_name|default('mgrs') }}" - "{{ mgr_group_name|default('mgrs') }}"
@ -759,11 +824,12 @@
containerized_deployment: true containerized_deployment: true
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
# NOTE: changed from file module to raw find command for performance reasons # NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary # The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user # as in this case we know we want all owned by ceph user
- name: set proper ownership on ceph directories - name: Set proper ownership on ceph directories
command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false changed_when: false

View File

@ -11,45 +11,51 @@
# 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`. # 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`.
# 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`. # 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`.
- hosts: mons - name: Fetch keys
become: True hosts: mons
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-fetch-keys
- hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- mgrs
- iscsi-gw
become: true become: true
tasks: tasks:
- import_role: - name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Import ceph-fetch-keys role
ansible.builtin.import_role:
name: ceph-fetch-keys
- name: Take over existing cluster
hosts:
- mons
- osds
- mdss
- rgws
- nfss
- rbdmirrors
- clients
- mgrs
- iscsi-gw
become: true
tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
post_tasks: post_tasks:
- name: get the name of the existing ceph cluster - name: Get the name of the existing ceph cluster
shell: | ansible.builtin.shell: |
set -o pipefail; set -o pipefail;
basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1) basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1)
changed_when: false changed_when: false
register: cluster_name register: cluster_name
- name: "stat {{ cluster_name.stdout }}.conf" - name: Run stat module on Ceph configuration file
stat: ansible.builtin.stat:
path: "/etc/ceph/{{ cluster_name.stdout }}.conf" path: "/etc/ceph/{{ cluster_name.stdout }}.conf"
register: ceph_conf_stat register: ceph_conf_stat
# Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format # Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format
- name: "make a backup of original {{ cluster_name.stdout }}.conf" - name: Make a backup of original Ceph configuration file
copy: ansible.builtin.copy:
src: "/etc/ceph/{{ cluster_name.stdout }}.conf" src: "/etc/ceph/{{ cluster_name.stdout }}.conf"
dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak" dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak"
remote_src: true remote_src: true
@ -57,7 +63,7 @@
group: "{{ ceph_conf_stat.stat.gr_name }}" group: "{{ ceph_conf_stat.stat.gr_name }}"
mode: "{{ ceph_conf_stat.stat.mode }}" mode: "{{ ceph_conf_stat.stat.mode }}"
- name: generate ceph configuration file - name: Generate ceph configuration file
openstack.config_template.config_template: openstack.config_template.config_template:
src: "roles/ceph-config/templates/ceph.conf.j2" src: "roles/ceph-config/templates/ceph.conf.j2"
dest: "/etc/ceph/{{ cluster_name.stdout }}.conf" dest: "/etc/ceph/{{ cluster_name.stdout }}.conf"

View File

@ -8,16 +8,16 @@
# the operation won't last for too long. # the operation won't last for too long.
- hosts: <your_host> - hosts: <your_host>
gather_facts: False gather_facts: false
tasks: tasks:
- name: Set the noout flag - name: Set the noout flag
command: ceph osd set noout ansible.builtin.command: ceph osd set noout
delegate_to: <your_monitor> delegate_to: <your_monitor>
- name: Turn off the server - name: Turn off the server
command: poweroff ansible.builtin.command: poweroff
- name: Wait for the server to go down - name: Wait for the server to go down
local_action: local_action:
@ -35,5 +35,5 @@
timeout: 3600 timeout: 3600
- name: Unset the noout flag - name: Unset the noout flag
command: ceph osd unset noout ansible.builtin.command: ceph osd unset noout
delegate_to: <your_monitor> delegate_to: <your_monitor>

View File

@ -10,7 +10,7 @@
- hosts: mons - hosts: mons
serial: 1 serial: 1
sudo: True sudo: true
vars: vars:
backup_dir: /tmp/ backup_dir: /tmp/
@ -18,13 +18,13 @@
tasks: tasks:
- name: Check if the node has be migrated already - name: Check if the node has be migrated already
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
register: migration_completed register: migration_completed
failed_when: false failed_when: false
- name: Check for failed run - name: Check for failed run
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: mon_archive_leftover register: mon_archive_leftover
@ -32,36 +32,36 @@
when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
- name: Compress the store as much as possible - name: Compress the store as much as possible
command: ceph tell mon.{{ ansible_facts['hostname'] }} compact ansible.builtin.command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Check if sysvinit - name: Check if sysvinit
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit register: monsysvinit
changed_when: False changed_when: false
- name: Check if upstart - name: Check if upstart
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart register: monupstart
changed_when: False changed_when: false
- name: Check if init does what it is supposed to do (Sysvinit) - name: Check if init does what it is supposed to do (Sysvinit)
shell: > ansible.builtin.shell: >
ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null
register: ceph_status_sysvinit register: ceph_status_sysvinit
changed_when: False changed_when: false
# can't complete the condition since the previous taks never ran... # can't complete the condition since the previous taks never ran...
- fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!" - fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
- name: Check if init does what it is supposed to do (upstart) - name: Check if init does what it is supposed to do (upstart)
shell: > ansible.builtin.shell: >
ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null
register: ceph_status_upstart register: ceph_status_upstart
changed_when: False changed_when: false
- fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!" - fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
@ -124,7 +124,7 @@
# NOTE (leseb): should we convert upstart to sysvinit here already? # NOTE (leseb): should we convert upstart to sysvinit here already?
- name: Archive monitor stores - name: Archive monitor stores
shell: > ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/ chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar creates={{ ansible_facts['hostname'] }}.tar
@ -138,7 +138,7 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Reboot the server - name: Reboot the server
command: reboot ansible.builtin.command: reboot
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Wait for the server to come up - name: Wait for the server to come up
@ -154,16 +154,16 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Check if sysvinit - name: Check if sysvinit
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit register: monsysvinit
changed_when: False changed_when: false
- name: Check if upstart - name: Check if upstart
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart register: monupstart
changed_when: False changed_when: false
- name: Make sure the monitor is stopped (Upstart) - name: Make sure the monitor is stopped (Upstart)
service: > service: >
@ -190,13 +190,13 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Copy keys and configs - name: Copy keys and configs
shell: > ansible.builtin.shell: >
cp etc/ceph/* /etc/ceph/ cp etc/ceph/* /etc/ceph/
chdir=/var/lib/ceph/ chdir=/var/lib/ceph/
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Configure RHEL7 for sysvinit - name: Configure RHEL7 for sysvinit
shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; ansible.builtin.shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
# NOTE (leseb): at this point the upstart and sysvinit checks are not necessary # NOTE (leseb): at this point the upstart and sysvinit checks are not necessary
@ -217,7 +217,7 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Waiting for the monitor to join the quorum... - name: Waiting for the monitor to join the quorum...
shell: > ansible.builtin.shell: >
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }} ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
register: result register: result
until: result.rc == 0 until: result.rc == 0
@ -238,20 +238,20 @@
- hosts: osds - hosts: osds
serial: 1 serial: 1
sudo: True sudo: true
vars: vars:
backup_dir: /tmp/ backup_dir: /tmp/
tasks: tasks:
- name: Check if the node has be migrated already - name: Check if the node has be migrated already
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/migration_completed path=/var/lib/ceph/migration_completed
register: migration_completed register: migration_completed
failed_when: false failed_when: false
- name: Check for failed run - name: Check for failed run
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: osd_archive_leftover register: osd_archive_leftover
@ -259,44 +259,44 @@
when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True
- name: Check if init does what it is supposed to do (Sysvinit) - name: Check if init does what it is supposed to do (Sysvinit)
shell: > ansible.builtin.shell: >
ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null
register: ceph_status_sysvinit register: ceph_status_sysvinit
changed_when: False changed_when: false
# can't complete the condition since the previous taks never ran... # can't complete the condition since the previous taks never ran...
- fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!" - fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
- name: Check if init does what it is supposed to do (upstart) - name: Check if init does what it is supposed to do (upstart)
shell: > ansible.builtin.shell: >
ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]" ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]"
register: ceph_status_upstart register: ceph_status_upstart
changed_when: False changed_when: false
- fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!" - fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
- name: Set the noout flag - name: Set the noout flag
command: ceph osd set noout ansible.builtin.command: ceph osd set noout
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups[mon_group_name][0] }}"
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Check if sysvinit - name: Check if sysvinit
shell: stat /var/lib/ceph/osd/ceph-*/sysvinit ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
register: osdsysvinit register: osdsysvinit
failed_when: false failed_when: false
changed_when: False changed_when: false
- name: Check if upstart - name: Check if upstart
shell: stat /var/lib/ceph/osd/ceph-*/upstart ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/upstart
register: osdupstart register: osdupstart
failed_when: false failed_when: false
changed_when: False changed_when: false
- name: Archive ceph configs - name: Archive ceph configs
shell: > ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/ chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar creates={{ ansible_facts['hostname'] }}.tar
@ -321,7 +321,7 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Collect OSD ports - name: Collect OSD ports
shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq ansible.builtin.shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq
register: osd_ports register: osd_ports
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
@ -349,11 +349,11 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Configure RHEL with sysvinit - name: Configure RHEL with sysvinit
shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; ansible.builtin.shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Reboot the server - name: Reboot the server
command: reboot ansible.builtin.command: reboot
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Wait for the server to come up - name: Wait for the server to come up
@ -379,7 +379,7 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Copy keys and configs - name: Copy keys and configs
shell: > ansible.builtin.shell: >
cp etc/ceph/* /etc/ceph/ cp etc/ceph/* /etc/ceph/
chdir=/var/lib/ceph/ chdir=/var/lib/ceph/
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
@ -405,7 +405,7 @@
# - "{{ osd_ports.stdout_lines }}" # - "{{ osd_ports.stdout_lines }}"
- name: Waiting for clean PGs... - name: Waiting for clean PGs...
shell: > ansible.builtin.shell: >
test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')" test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')"
register: result register: result
until: result.rc == 0 until: result.rc == 0
@ -425,27 +425,27 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Unset the noout flag - name: Unset the noout flag
command: ceph osd unset noout ansible.builtin.command: ceph osd unset noout
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups[mon_group_name][0] }}"
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- hosts: rgws - hosts: rgws
serial: 1 serial: 1
sudo: True sudo: true
vars: vars:
backup_dir: /tmp/ backup_dir: /tmp/
tasks: tasks:
- name: Check if the node has be migrated already - name: Check if the node has be migrated already
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/radosgw/migration_completed path=/var/lib/ceph/radosgw/migration_completed
register: migration_completed register: migration_completed
failed_when: false failed_when: false
- name: Check for failed run - name: Check for failed run
stat: > ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: rgw_archive_leftover register: rgw_archive_leftover
@ -453,7 +453,7 @@
when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True
- name: Archive rados gateway configs - name: Archive rados gateway configs
shell: > ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/ chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar creates={{ ansible_facts['hostname'] }}.tar
@ -494,7 +494,7 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Reboot the server - name: Reboot the server
command: reboot ansible.builtin.command: reboot
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Wait for the server to come up - name: Wait for the server to come up
@ -520,7 +520,7 @@
when: migration_completed.stat.exists == False when: migration_completed.stat.exists == False
- name: Copy keys and configs - name: Copy keys and configs
shell: > ansible.builtin.shell: >
{{ item }} {{ item }}
chdir=/var/lib/ceph/ chdir=/var/lib/ceph/
with_items: cp etc/ceph/* /etc/ceph/ with_items: cp etc/ceph/* /etc/ceph/

View File

@ -31,7 +31,7 @@
tasks: tasks:
- name: load a variable file for devices partition - name: Load a variable file for devices partition
include_vars: "{{ item }}" include_vars: "{{ item }}"
with_first_found: with_first_found:
- files: - files:
@ -39,24 +39,24 @@
- "host_vars/default.yml" - "host_vars/default.yml"
skip: true skip: true
- name: exit playbook, if devices not defined - name: Exit playbook, if devices not defined
fail: ansible.builtin.fail:
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml" msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
when: devices is not defined when: devices is not defined
- name: install sgdisk(gdisk) - name: Install sgdisk(gdisk)
package: ansible.builtin.package:
name: gdisk name: gdisk
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
- name: erase all previous partitions(dangerous!!!) - name: Erase all previous partitions(dangerous!!!)
shell: sgdisk --zap-all -- /dev/{{item.device_name}} ansible.builtin.shell: sgdisk --zap-all -- /dev/{{item.device_name}}
with_items: "{{ devices }}" with_items: "{{ devices }}"
- name: make osd partitions - name: Make osd partitions
shell: > ansible.builtin.shell: >
sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}" sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}"
"--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}" "--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}"
--mbrtogpt -- /dev/{{item.0.device_name}} --mbrtogpt -- /dev/{{item.0.device_name}}
@ -74,8 +74,8 @@
group: 64045 group: 64045
when: ansible_facts['os_family'] == "Debian" when: ansible_facts['os_family'] == "Debian"
- name: change partitions ownership - name: Change partitions ownership
file: ansible.builtin.file:
path: "/dev/{{item.0.device_name}}{{item.1.index}}" path: "/dev/{{item.0.device_name}}{{item.1.index}}"
owner: "{{ owner | default('root')}}" owner: "{{ owner | default('root')}}"
group: "{{ group | default('disk')}}" group: "{{ group | default('disk')}}"
@ -85,8 +85,8 @@
when: when:
item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$') item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$')
- name: change partitions ownership - name: Change partitions ownership
file: ansible.builtin.file:
path: "/dev/{{item.0.device_name}}p{{item.1.index}}" path: "/dev/{{item.0.device_name}}p{{item.1.index}}"
owner: "{{ owner | default('root')}}" owner: "{{ owner | default('root')}}"
group: "{{ group | default('disk')}}" group: "{{ group | default('disk')}}"

View File

@ -37,69 +37,69 @@
serial: 1 serial: 1
tasks: tasks:
- name: get osd(s) if directory stat - name: Get osd(s) if directory stat
stat: ansible.builtin.stat:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_dir_stat register: osds_dir_stat
with_subelements: with_subelements:
- "{{ osds_journal_devices }}" - "{{ osds_journal_devices }}"
- partitions - partitions
- name: exit playbook osd(s) is not on this host - name: Exit playbook osd(s) is not on this host
fail: ansible.builtin.fail:
msg: exit playbook osd(s) is not on this host msg: exit playbook osd(s) is not on this host
with_items: with_items:
osds_dir_stat.results osds_dir_stat.results
when: osds_dir_stat is defined and item.stat.exists == false when: osds_dir_stat is defined and item.stat.exists == false
- name: install sgdisk(gdisk) - name: Install sgdisk(gdisk)
package: ansible.builtin.package:
name: gdisk name: gdisk
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
when: osds_journal_devices is defined when: osds_journal_devices is defined
- name: generate uuid for osds journal - name: Generate uuid for osds journal
command: uuidgen ansible.builtin.command: uuidgen
register: osds register: osds
with_subelements: with_subelements:
- "{{ osds_journal_devices }}" - "{{ osds_journal_devices }}"
- partitions - partitions
- name: make osd partitions on ssd - name: Make osd partitions on ssd
shell: > ansible.builtin.shell: >
sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal" sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
--typecode={{ item.item[1].index }}:{{ journal_typecode }} --typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }} --partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }} --mbrtogpt -- {{ item.item[0].device_name }}
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"
- name: stop osd(s) service - name: Stop osd(s) service
service: ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}" name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped state: stopped
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"
- name: flush osd(s) journal - name: Flush osd(s) journal
command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }} ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"
when: osds_journal_devices is defined when: osds_journal_devices is defined
- name: update osd(s) journal soft link - name: Update osd(s) journal soft link
command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal ansible.builtin.command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"
- name: update osd(s) journal uuid - name: Update osd(s) journal uuid
command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid ansible.builtin.command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"
- name: initialize osd(s) new journal - name: Initialize osd(s) new journal
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"
- name: start osd(s) service - name: Start osd(s) service
service: ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}" name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started state: started
with_items: "{{ osds.results }}" with_items: "{{ osds.results }}"

View File

@ -1,11 +1,11 @@
--- ---
# Nukes a multisite config # Nukes a multisite config
- hosts: rgws - hosts: rgws
become: True become: true
tasks: tasks:
- include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml
handlers: handlers:
# Ansible 2.1.0 bug will ignore included handlers without this # Ansible 2.1.0 bug will ignore included handlers without this
- name: import_tasks roles/ceph-rgw/handlers/main.yml - name: Import_tasks roles/ceph-rgw/handlers/main.yml
import_tasks: roles/ceph-rgw/handlers/main.yml import_tasks: roles/ceph-rgw/handlers/main.yml

View File

@ -30,7 +30,7 @@
# @param osd_id: Which osds's journal this partition for. # @param osd_id: Which osds's journal this partition for.
# #
# ansible-playbook recover-osds-after-ssd-journal-failure.yml # ansible-playbook recover-osds-after-ssd-journal-failure.yml
# Prompts for select which host to recover, defaults to null, # Prompts for select which host to recover, defaults to null,
# doesn't select host the recover ssd. Input the hostname # doesn't select host the recover ssd. Input the hostname
# which to recover osds after ssd journal failure # which to recover osds after ssd journal failure
# #
@ -40,11 +40,11 @@
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- hosts: localhost - hosts: localhost
gather_facts: no gather_facts: false
vars_prompt: vars_prompt:
- name: target_host - name: target_host # noqa: name[casing]
prompt: please enter the target hostname which to recover osds after ssd journal failure prompt: please enter the target hostname which to recover osds after ssd journal failure
private: no private: false
tasks: tasks:
- add_host: - add_host:
name: "{{ target_host }}" name: "{{ target_host }}"
@ -59,16 +59,16 @@
- fail: msg="please define dev_ssds variable" - fail: msg="please define dev_ssds variable"
when: dev_ssds|length <= 0 when: dev_ssds|length <= 0
- name: get osd(s) if directory stat - name: Get osd(s) if directory stat
stat: ansible.builtin.stat:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_dir_stat register: osds_dir_stat
with_subelements: with_subelements:
- "{{ dev_ssds }}" - "{{ dev_ssds }}"
- partitions - partitions
- name: exit playbook osd(s) is not on this host - name: Exit playbook osd(s) is not on this host
fail: ansible.builtin.fail:
msg: exit playbook osds is not no this host msg: exit playbook osds is not no this host
with_items: with_items:
osds_dir_stat.results osds_dir_stat.results
@ -76,40 +76,40 @@
- osds_dir_stat is defined | bool - osds_dir_stat is defined | bool
- item.stat.exists == false - item.stat.exists == false
- name: install sgdisk(gdisk) - name: Install sgdisk(gdisk)
package: ansible.builtin.package:
name: gdisk name: gdisk
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
- name: get osd(s) journal uuid - name: Get osd(s) journal uuid
command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" ansible.builtin.command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_uuid register: osds_uuid
with_subelements: with_subelements:
- "{{ dev_ssds }}" - "{{ dev_ssds }}"
- partitions - partitions
- name: make partitions on new ssd - name: Make partitions on new ssd
shell: > ansible.builtin.shell: >
sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal" sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
--typecode={{ item.item[1].index }}:{{ journal_typecode }} --typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }} --partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }} --mbrtogpt -- {{ item.item[0].device_name }}
with_items: "{{ osds_uuid.results }}" with_items: "{{ osds_uuid.results }}"
- name: stop osd(s) service - name: Stop osd(s) service
service: ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}" name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped state: stopped
with_items: "{{ osds_uuid.results }}" with_items: "{{ osds_uuid.results }}"
- name: reinitialize osd(s) journal in new ssd - name: Reinitialize osd(s) journal in new ssd
command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items: "{{ osds_uuid.results }}" with_items: "{{ osds_uuid.results }}"
- name: start osd(s) service - name: Start osd(s) service
service: ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}" name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started state: started
with_items: "{{ osds_uuid.results }}" with_items: "{{ osds_uuid.results }}"

View File

@ -15,38 +15,38 @@
# Overrides the prompt using -e option. Can be used in # Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt. # automation scripts to avoid interactive prompt.
- name: gather facts and check the init system - name: Gather facts and check the init system
hosts: hosts:
- "{{ mon_group_name|default('mons') }}" - "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}" - "{{ osd_group_name|default('osds') }}"
become: True become: true
tasks: tasks:
- debug: msg="gather facts on all Ceph hosts for following reference" - ansible.builtin.debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to replace osd(s) - name: Confirm whether user really meant to replace osd(s)
hosts: localhost hosts: localhost
become: true become: true
vars_prompt: vars_prompt:
- name: ireallymeanit - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to replace the osd(s)? prompt: Are you sure you want to replace the osd(s)?
default: 'no' default: 'no'
private: no private: false
vars: vars:
mon_group_name: mons mon_group_name: mons
osd_group_name: osds osd_group_name: osds
pre_tasks: pre_tasks:
- name: exit playbook, if user did not mean to replace the osd(s) - name: Exit playbook, if user did not mean to replace the osd(s)
fail: ansible.builtin.fail:
msg: "Exiting replace-osd playbook, no osd(s) was/were replaced.. msg: "Exiting replace-osd playbook, no osd(s) was/were replaced..
To replace the osd(s), either say 'yes' on the prompt or To replace the osd(s), either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when or use `-e ireallymeanit=yes` on the command line when
invoking the playbook" invoking the playbook"
when: ireallymeanit != 'yes' when: ireallymeanit != 'yes'
- name: exit playbook, if no osd(s) was/were given - name: Exit playbook, if no osd(s) was/were given
fail: ansible.builtin.fail:
msg: "osd_to_replace must be declared msg: "osd_to_replace must be declared
Exiting replace-osd playbook, no OSD(s) was/were replaced. Exiting replace-osd playbook, no OSD(s) was/were replaced.
On the command line when invoking the playbook, you can use On the command line when invoking the playbook, you can use
@ -54,36 +54,36 @@
when: osd_to_replace is not defined when: osd_to_replace is not defined
tasks: tasks:
- import_role: - ansible.builtin.import_role:
name: ceph-defaults name: ceph-defaults
post_tasks: post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized) - name: Set_fact container_exec_cmd build docker exec command (containerized)
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster - name: Exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health register: ceph_health
until: ceph_health.stdout.find("HEALTH") > -1 until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 5 retries: 5
delay: 2 delay: 2
- name: find the host(s) where the osd(s) is/are running on - name: Find the host(s) where the osd(s) is/are running on
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
with_items: "{{ osd_to_replace.split(',') }}" with_items: "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
register: find_osd_hosts register: find_osd_hosts
- name: set_fact osd_hosts - name: Set_fact osd_hosts
set_fact: ansible.builtin.set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}" osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}"
with_items: "{{ find_osd_hosts.results }}" with_items: "{{ find_osd_hosts.results }}"
- name: check if ceph admin key exists on the osd nodes - name: Check if ceph admin key exists on the osd nodes
stat: ansible.builtin.stat:
path: "/etc/ceph/{{ cluster }}.client.admin.keyring" path: "/etc/ceph/{{ cluster }}.client.admin.keyring"
register: ceph_admin_key register: ceph_admin_key
with_items: "{{ osd_hosts }}" with_items: "{{ osd_hosts }}"
@ -91,8 +91,8 @@
failed_when: false failed_when: false
when: not containerized_deployment | bool when: not containerized_deployment | bool
- name: fail when admin key is not present - name: Fail when admin key is not present
fail: ansible.builtin.fail:
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done." msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
with_items: "{{ ceph_admin_key.results }}" with_items: "{{ ceph_admin_key.results }}"
when: when:
@ -100,8 +100,8 @@
- item.stat.exists == false - item.stat.exists == false
# NOTE(leseb): using '>' is the only way I could have the command working # NOTE(leseb): using '>' is the only way I could have the command working
- name: find osd device based on the id - name: Find osd device based on the id
shell: > ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}' list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}'
@ -112,8 +112,8 @@
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: zapping osd(s) - container - name: Zapping osd(s) - container
shell: > ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
zap {{ item.1 }} zap {{ item.1 }}
@ -124,8 +124,8 @@
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: zapping osd(s) - non container - name: Zapping osd(s) - non container
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }} ansible.builtin.command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
run_once: true run_once: true
with_together: with_together:
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
@ -133,8 +133,8 @@
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool when: not containerized_deployment | bool
- name: destroying osd(s) - name: Destroying osd(s)
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap ansible.builtin.command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
run_once: true run_once: true
with_together: with_together:
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
@ -142,8 +142,8 @@
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool when: not containerized_deployment | bool
- name: replace osd(s) - prepare - non container - name: Replace osd(s) - prepare - non container
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen) ansible.builtin.command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
run_once: true run_once: true
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
with_together: with_together:
@ -151,8 +151,8 @@
- "{{ osd_to_replace_disks.results }}" - "{{ osd_to_replace_disks.results }}"
- "{{ osd_to_replace.split(',') }}" - "{{ osd_to_replace.split(',') }}"
- name: replace osd(s) - prepare - container - name: Replace osd(s) - prepare - container
shell: > ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
prepare {{ item.1 }} prepare {{ item.1 }}
@ -162,16 +162,16 @@
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}" - "{{ osd_to_replace_disks.results }}"
- name: replace osd(s) - activate - non container - name: Replace osd(s) - activate - non container
command: ceph-disk activate {{ item.1 }}1 ansible.builtin.command: ceph-disk activate {{ item.1 }}1
run_once: true run_once: true
delegate_to: "{{ item.0 }}" delegate_to: "{{ item.0 }}"
with_together: with_together:
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}" - "{{ osd_to_replace_disks.results }}"
- name: replace osd(s) - activate - container - name: Replace osd(s) - activate - container
shell: > ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
activate {{ item.1 }}1 activate {{ item.1 }}1
@ -181,10 +181,10 @@
- "{{ osd_hosts }}" - "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}" - "{{ osd_to_replace_disks.results }}"
- name: show ceph health - name: Show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
- name: show ceph osd tree - name: Show ceph osd tree
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree" ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"

View File

@ -37,5 +37,5 @@ pools:
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ... # - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
keys: keys:
- { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" } - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
- { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" } - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Sébastien Han author: Sébastien Han
description: Installs A Ceph Client description: Installs A Ceph Client
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,13 +1,13 @@
--- ---
- name: set_fact delegated_node - name: Set_fact delegated_node
set_fact: ansible.builtin.set_fact:
delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}" delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
- name: set_fact admin_key_presence - name: Set_fact admin_key_presence
set_fact: ansible.builtin.set_fact:
admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}" admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
- name: create cephx key(s) - name: Create cephx key(s)
ceph_key: ceph_key:
name: "{{ item.name }}" name: "{{ item.name }}"
caps: "{{ item.caps }}" caps: "{{ item.caps }}"
@ -30,8 +30,8 @@
- inventory_hostname == groups.get('_filtered_clients') | first - inventory_hostname == groups.get('_filtered_clients') | first
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: slurp client cephx key(s) - name: Slurp client cephx key(s)
slurp: ansible.builtin.slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring" src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items: "{{ keys }}" with_items: "{{ keys }}"
register: slurp_client_keys register: slurp_client_keys
@ -42,16 +42,17 @@
- inventory_hostname == groups.get('_filtered_clients') | first - inventory_hostname == groups.get('_filtered_clients') | first
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: pool related tasks - name: Pool related tasks
when: when:
- admin_key_presence | bool - admin_key_presence | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first - inventory_hostname == groups.get('_filtered_clients', []) | first
block: block:
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: get_def_crush_rule_name.yml tasks_from: get_def_crush_rule_name.yml
- name: create ceph pool(s) - name: Create ceph pool(s)
ceph_pool: ceph_pool:
name: "{{ item.name }}" name: "{{ item.name }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -72,8 +73,8 @@
changed_when: false changed_when: false
delegate_to: "{{ delegated_node }}" delegate_to: "{{ delegated_node }}"
- name: get client cephx keys - name: Get client cephx keys
copy: ansible.builtin.copy:
dest: "{{ item.source }}" dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}" content: "{{ item.content | b64decode }}"
mode: "{{ item.item.get('mode', '0600') }}" mode: "{{ item.item.get('mode', '0600') }}"
@ -82,4 +83,3 @@
with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}" with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
when: not item.get('skipped', False) when: not item.get('skipped', False)
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"

View File

@ -1,10 +1,10 @@
--- ---
- name: include pre_requisite.yml - name: Include pre_requisite.yml
include_tasks: pre_requisite.yml ansible.builtin.include_tasks: pre_requisite.yml
when: groups.get(mon_group_name, []) | length > 0 when: groups.get(mon_group_name, []) | length > 0
- name: include create_users_keys.yml - name: Include create_users_keys.yml
include_tasks: create_users_keys.yml ansible.builtin.include_tasks: create_users_keys.yml
when: when:
- user_config | bool - user_config | bool
- not rolling_update | default(False) | bool - not rolling_update | default(False) | bool

View File

@ -1,7 +1,10 @@
--- ---
- name: copy ceph admin keyring - name: Copy ceph admin keyring
when:
- cephx | bool
- copy_admin_key | bool
block: block:
- name: get keys from monitors - name: Get keys from monitors
ceph_key: ceph_key:
name: client.admin name: client.admin
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -15,14 +18,11 @@
run_once: true run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: copy ceph key(s) if needed - name: Copy ceph key(s) if needed
copy: ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
content: "{{ _admin_key.stdout + '\n' }}" content: "{{ _admin_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}" mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
when:
- cephx | bool
- copy_admin_key | bool

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Sébastien Han author: Sébastien Han
description: Installs Ceph description: Installs Ceph
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,11 +1,12 @@
--- ---
- name: configure cluster name - name: Configure cluster name
lineinfile: ansible.builtin.lineinfile:
dest: /etc/sysconfig/ceph dest: /etc/sysconfig/ceph
insertafter: EOF insertafter: EOF
create: yes create: true
line: "CLUSTER={{ cluster }}" line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER=" regexp: "^CLUSTER="
mode: "0644"
when: ansible_facts['os_family'] in ["RedHat", "Suse"] when: ansible_facts['os_family'] in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check # NOTE(leseb): we are performing the following check
@ -18,32 +19,34 @@
# - Jewel from latest Canonical 16.04 distro # - Jewel from latest Canonical 16.04 distro
# - All previous versions from Canonical # - All previous versions from Canonical
# - Infernalis from ceph.com # - Infernalis from ceph.com
- name: debian based systems - configure cluster name - name: Debian based systems - configure cluster name
when: ansible_facts['os_family'] == "Debian" when: ansible_facts['os_family'] == "Debian"
block: block:
- name: check /etc/default/ceph exist - name: Check /etc/default/ceph exist
stat: ansible.builtin.stat:
path: /etc/default/ceph path: /etc/default/ceph
register: etc_default_ceph register: etc_default_ceph
check_mode: no check_mode: false
- name: configure cluster name - name: Configure cluster name
when: etc_default_ceph.stat.exists when: etc_default_ceph.stat.exists
block: block:
- name: when /etc/default/ceph is not dir - name: When /etc/default/ceph is not dir
lineinfile: ansible.builtin.lineinfile:
dest: /etc/default/ceph dest: /etc/default/ceph
insertafter: EOF insertafter: EOF
create: yes create: true
regexp: "^CLUSTER=" regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}" line: "CLUSTER={{ cluster }}"
mode: "0644"
when: not etc_default_ceph.stat.isdir when: not etc_default_ceph.stat.isdir
- name: when /etc/default/ceph is dir - name: When /etc/default/ceph is dir
lineinfile: ansible.builtin.lineinfile:
dest: /etc/default/ceph/ceph dest: /etc/default/ceph/ceph
insertafter: EOF insertafter: EOF
create: yes create: true
regexp: "^CLUSTER=" regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}" line: "CLUSTER={{ cluster }}"
mode: "0644"
when: etc_default_ceph.stat.isdir when: etc_default_ceph.stat.isdir

View File

@ -1,34 +1,36 @@
--- ---
- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian - name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian
lineinfile: ansible.builtin.lineinfile:
dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}" dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}"
insertafter: EOF insertafter: EOF
create: yes create: true
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
mode: "0644"
when: when:
- ansible_facts['os_family'] == 'Debian' - ansible_facts['os_family'] == 'Debian'
- etc_default_ceph.stat.exists - etc_default_ceph.stat.exists
notify: notify:
- restart ceph mons - Restart ceph mons
- restart ceph mgrs - Restart ceph mgrs
- restart ceph osds - Restart ceph osds
- restart ceph mdss - Restart ceph mdss
- restart ceph rgws - Restart ceph rgws
- restart ceph rbdmirrors - Restart ceph rbdmirrors
- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat - name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
lineinfile: ansible.builtin.lineinfile:
dest: "/etc/sysconfig/ceph" dest: "/etc/sysconfig/ceph"
insertafter: EOF insertafter: EOF
create: yes create: true
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
mode: "0644"
when: ansible_facts['os_family'] == 'RedHat' when: ansible_facts['os_family'] == 'RedHat'
notify: notify:
- restart ceph mons - Restart ceph mons
- restart ceph mgrs - Restart ceph mgrs
- restart ceph osds - Restart ceph osds
- restart ceph mdss - Restart ceph mdss
- restart ceph rgws - Restart ceph rgws
- restart ceph rbdmirrors - Restart ceph rbdmirrors

View File

@ -1,32 +1,32 @@
--- ---
- name: config repository for Red Hat based OS - name: Config repository for Red Hat based OS
when: ansible_facts['os_family'] == 'RedHat' when: ansible_facts['os_family'] == 'RedHat'
block: block:
- name: include installs/configure_redhat_repository_installation.yml - name: Include installs/configure_redhat_repository_installation.yml
include_tasks: installs/configure_redhat_repository_installation.yml ansible.builtin.include_tasks: installs/configure_redhat_repository_installation.yml
when: ceph_origin == 'repository' when: ceph_origin == 'repository'
- name: include installs/configure_redhat_local_installation.yml - name: Include installs/configure_redhat_local_installation.yml
include_tasks: installs/configure_redhat_local_installation.yml ansible.builtin.include_tasks: installs/configure_redhat_local_installation.yml
when: ceph_origin == 'local' when: ceph_origin == 'local'
- name: config repository for Debian based OS - name: Config repository for Debian based OS
when: ansible_facts['os_family'] == 'Debian' when: ansible_facts['os_family'] == 'Debian'
tags: package-install
block: block:
- name: include installs/configure_debian_repository_installation.yml - name: Include installs/configure_debian_repository_installation.yml
include_tasks: installs/configure_debian_repository_installation.yml ansible.builtin.include_tasks: installs/configure_debian_repository_installation.yml
when: ceph_origin == 'repository' when: ceph_origin == 'repository'
- name: update apt cache if cache_valid_time has expired - name: Update apt cache if cache_valid_time has expired
apt: ansible.builtin.apt:
update_cache: yes update_cache: true
cache_valid_time: 3600 cache_valid_time: 3600
register: result register: result
until: result is succeeded until: result is succeeded
tags: package-install
- name: include installs/configure_suse_repository_installation.yml - name: Include installs/configure_suse_repository_installation.yml
include_tasks: installs/configure_suse_repository_installation.yml ansible.builtin.include_tasks: installs/configure_suse_repository_installation.yml
when: when:
- ansible_facts['os_family'] == 'Suse' - ansible_facts['os_family'] == 'Suse'
- ceph_origin == 'repository' - ceph_origin == 'repository'

View File

@ -1,6 +1,6 @@
--- ---
- name: create rbd client directory - name: Create rbd client directory
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: directory state: directory
owner: "{{ rbd_client_directory_owner }}" owner: "{{ rbd_client_directory_owner }}"

View File

@ -1,16 +1,16 @@
--- ---
- name: include debian_community_repository.yml - name: Include debian_community_repository.yml
include_tasks: debian_community_repository.yml ansible.builtin.include_tasks: debian_community_repository.yml
when: ceph_repository == 'community' when: ceph_repository == 'community'
- name: include debian_dev_repository.yml - name: Include debian_dev_repository.yml
include_tasks: debian_dev_repository.yml ansible.builtin.include_tasks: debian_dev_repository.yml
when: ceph_repository == 'dev' when: ceph_repository == 'dev'
- name: include debian_custom_repository.yml - name: Include debian_custom_repository.yml
include_tasks: debian_custom_repository.yml ansible.builtin.include_tasks: debian_custom_repository.yml
when: ceph_repository == 'custom' when: ceph_repository == 'custom'
- name: include debian_uca_repository.yml - name: Include debian_uca_repository.yml
include_tasks: debian_uca_repository.yml ansible.builtin.include_tasks: debian_uca_repository.yml
when: ceph_repository == 'uca' when: ceph_repository == 'uca'

View File

@ -1,43 +1,45 @@
--- ---
- name: make sure /tmp exists - name: Make sure /tmp exists
file: ansible.builtin.file:
path: /tmp path: /tmp
state: directory state: directory
mode: "0755"
when: use_installer | bool when: use_installer | bool
- name: use mktemp to create name for rundep - name: Use mktemp to create name for rundep
tempfile: ansible.builtin.tempfile:
path: /tmp path: /tmp
prefix: rundep. prefix: rundep.
register: rundep_location register: rundep_location
when: use_installer | bool when: use_installer | bool
- name: copy rundep - name: Copy rundep
copy: ansible.builtin.copy:
src: "{{ ansible_dir }}/rundep" src: "{{ ansible_dir }}/rundep"
dest: "{{ rundep_location.path }}" dest: "{{ rundep_location.path }}"
mode: preserve
when: use_installer | bool when: use_installer | bool
- name: install ceph dependencies - name: Install ceph dependencies
script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}" ansible.builtin.script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}"
when: use_installer | bool when: use_installer | bool
- name: ensure rsync is installed - name: Ensure rsync is installed
package: ansible.builtin.package:
name: rsync name: rsync
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
- name: synchronize ceph install - name: Synchronize ceph install
synchronize: ansible.posix.synchronize:
src: "{{ ceph_installation_dir }}/" src: "{{ ceph_installation_dir }}/"
dest: "/" dest: "/"
- name: create user group ceph - name: Create user group ceph
group: ansible.builtin.group:
name: 'ceph' name: 'ceph'
- name: create user ceph - name: Create user ceph
user: ansible.builtin.user:
name: 'ceph' name: 'ceph'

View File

@ -1,22 +1,22 @@
--- ---
- name: include redhat_community_repository.yml - name: Include redhat_community_repository.yml
include_tasks: redhat_community_repository.yml ansible.builtin.include_tasks: redhat_community_repository.yml
when: ceph_repository == 'community' when: ceph_repository == 'community'
- name: include redhat_rhcs_repository.yml - name: Include redhat_rhcs_repository.yml
include_tasks: redhat_rhcs_repository.yml ansible.builtin.include_tasks: redhat_rhcs_repository.yml
when: ceph_repository == 'rhcs' when: ceph_repository == 'rhcs'
- name: include redhat_dev_repository.yml - name: Include redhat_dev_repository.yml
include_tasks: redhat_dev_repository.yml ansible.builtin.include_tasks: redhat_dev_repository.yml
when: ceph_repository == 'dev' when: ceph_repository == 'dev'
- name: include redhat_custom_repository.yml - name: Include redhat_custom_repository.yml
include_tasks: redhat_custom_repository.yml ansible.builtin.include_tasks: redhat_custom_repository.yml
when: ceph_repository == 'custom' when: ceph_repository == 'custom'
# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version # Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version
- name: purge yum cache - name: Purge yum cache
command: yum clean all #noqa: [303] ansible.builtin.command: yum clean all # noqa: [303]
changed_when: false changed_when: false
when: ansible_facts['pkg_mgr'] == 'yum' when: ansible_facts['pkg_mgr'] == 'yum'

View File

@ -1,4 +1,4 @@
--- ---
- name: include suse_obs_repository.yml - name: Include suse_obs_repository.yml
include_tasks: suse_obs_repository.yml ansible.builtin.include_tasks: suse_obs_repository.yml
when: ceph_repository == 'obs' when: ceph_repository == 'obs'

View File

@ -1,20 +1,20 @@
--- ---
- name: install dependencies for apt modules - name: Install dependencies for apt modules
package: ansible.builtin.package:
name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common'] name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
update_cache: yes update_cache: true
register: result register: result
until: result is succeeded until: result is succeeded
- name: configure debian ceph community repository stable key - name: Configure debian ceph community repository stable key
apt_key: ansible.builtin.apt_key:
data: "{{ lookup('file', role_path+'/files/cephstable.asc') }}" data: "{{ lookup('file', role_path + '/files/cephstable.asc') }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
- name: configure debian ceph stable community repository - name: Configure debian ceph stable community repository
apt_repository: ansible.builtin.apt_repository:
repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present state: present
update_cache: yes update_cache: true

View File

@ -1,14 +1,14 @@
--- ---
- name: configure debian custom apt key - name: Configure debian custom apt key
apt_key: ansible.builtin.apt_key:
url: "{{ ceph_custom_key }}" url: "{{ ceph_custom_key }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
when: ceph_custom_key is defined when: ceph_custom_key is defined
- name: configure debian custom repository - name: Configure debian custom repository
apt_repository: ansible.builtin.apt_repository:
repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main" repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
state: present state: present
update_cache: yes update_cache: true

View File

@ -1,12 +1,12 @@
--- ---
- name: fetch ceph debian development repository - name: Fetch ceph debian development repository
uri: ansible.builtin.uri:
url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}" url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}"
return_content: yes return_content: true
register: ceph_dev_deb_repo register: ceph_dev_deb_repo
- name: configure ceph debian development repository - name: Configure ceph debian development repository
apt_repository: ansible.builtin.apt_repository:
repo: "{{ ceph_dev_deb_repo.content }}" repo: "{{ ceph_dev_deb_repo.content }}"
state: present state: present
update_cache: yes update_cache: true

View File

@ -1,12 +1,12 @@
--- ---
- name: add ubuntu cloud archive key package - name: Add ubuntu cloud archive key package
package: ansible.builtin.package:
name: ubuntu-cloud-keyring name: ubuntu-cloud-keyring
register: result register: result
until: result is succeeded until: result is succeeded
- name: add ubuntu cloud archive repository - name: Add ubuntu cloud archive repository
apt_repository: ansible.builtin.apt_repository:
repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main" repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main"
state: present state: present
update_cache: yes update_cache: true

View File

@ -1,9 +1,9 @@
--- ---
- name: install ceph for debian - name: Install ceph for debian
apt: ansible.builtin.apt:
name: "{{ debian_ceph_pkgs | unique }}" name: "{{ debian_ceph_pkgs | unique }}"
update_cache: no update_cache: false
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result register: result
until: result is succeeded until: result is succeeded

View File

@ -1,7 +1,7 @@
--- ---
- name: install red hat storage ceph packages for debian - name: Install red hat storage ceph packages for debian
apt: ansible.builtin.apt:
pkg: "{{ debian_ceph_pkgs | unique }}" pkg: "{{ debian_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result register: result
until: result is succeeded until: result is succeeded

View File

@ -1,6 +1,6 @@
--- ---
- name: install ceph bundle - name: Install ceph bundle
swupd: community.general.swupd:
name: storage-cluster name: storage-cluster
state: present state: present
register: result register: result

View File

@ -1,20 +1,20 @@
- name: install dependencies - name: Install dependencies
apt: ansible.builtin.apt:
name: "{{ debian_package_dependencies }}" name: "{{ debian_package_dependencies }}"
state: present state: present
update_cache: yes update_cache: true
cache_valid_time: 3600 cache_valid_time: 3600
register: result register: result
until: result is succeeded until: result is succeeded
- name: include install_debian_packages.yml - name: Include install_debian_packages.yml
include_tasks: install_debian_packages.yml ansible.builtin.include_tasks: install_debian_packages.yml
when: when:
- (ceph_origin == 'repository' or ceph_origin == 'distro') - (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs' - ceph_repository != 'rhcs'
- name: include install_debian_rhcs_packages.yml - name: Include install_debian_rhcs_packages.yml
include_tasks: install_debian_rhcs_packages.yml ansible.builtin.include_tasks: install_debian_rhcs_packages.yml
when: when:
- (ceph_origin == 'repository' or ceph_origin == 'distro') - (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository == 'rhcs' - ceph_repository == 'rhcs'

View File

@ -1,23 +1,23 @@
--- ---
- name: install redhat dependencies - name: Install redhat dependencies
package: ansible.builtin.package:
name: "{{ redhat_package_dependencies }}" name: "{{ redhat_package_dependencies }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
when: ansible_facts['distribution'] == 'RedHat' when: ansible_facts['distribution'] == 'RedHat'
- name: install centos dependencies - name: Install centos dependencies
yum: ansible.builtin.yum:
name: "{{ centos_package_dependencies }}" name: "{{ centos_package_dependencies }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
when: ansible_facts['distribution'] == 'CentOS' when: ansible_facts['distribution'] == 'CentOS'
- name: install redhat ceph packages - name: Install redhat ceph packages
package: ansible.builtin.package:
name: "{{ redhat_ceph_pkgs | unique }}" name: "{{ redhat_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result register: result
until: result is succeeded until: result is succeeded

View File

@ -1,14 +1,14 @@
--- ---
- name: install SUSE/openSUSE dependencies - name: Install SUSE/openSUSE dependencies
package: ansible.builtin.package:
name: "{{ suse_package_dependencies }}" name: "{{ suse_package_dependencies }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
- name: install SUSE/openSUSE ceph packages - name: Install SUSE/openSUSE ceph packages
package: ansible.builtin.package:
name: "{{ suse_ceph_pkgs | unique }}" name: "{{ suse_ceph_pkgs | unique }}"
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result register: result
until: result is succeeded until: result is succeeded

View File

@ -1,6 +1,6 @@
--- ---
- name: enable red hat storage tools repository - name: Enable red hat storage tools repository
rhsm_repository: community.general.rhsm_repository:
name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms" name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: when:
- mon_group_name in group_names - mon_group_name in group_names

View File

@ -1,24 +1,24 @@
--- ---
- name: install yum plugin priorities - name: Install yum plugin priorities
package: ansible.builtin.package:
name: yum-plugin-priorities name: yum-plugin-priorities
register: result register: result
until: result is succeeded until: result is succeeded
tags: with_pkg tags: with_pkg
when: ansible_facts['distribution_major_version'] | int == 7 when: ansible_facts['distribution_major_version'] | int == 7
- name: configure red hat ceph community repository stable key - name: Configure red hat ceph community repository stable key
rpm_key: ansible.builtin.rpm_key:
key: "{{ ceph_stable_key }}" key: "{{ ceph_stable_key }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
- name: configure red hat ceph stable community repository - name: Configure red hat ceph stable community repository
yum_repository: ansible.builtin.yum_repository:
name: ceph_stable name: ceph_stable
description: Ceph Stable $basearch repo description: Ceph Stable $basearch repo
gpgcheck: yes gpgcheck: true
state: present state: present
gpgkey: "{{ ceph_stable_key }}" gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch" baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
@ -27,11 +27,11 @@
register: result register: result
until: result is succeeded until: result is succeeded
- name: configure red hat ceph stable noarch community repository - name: Configure red hat ceph stable noarch community repository
yum_repository: ansible.builtin.yum_repository:
name: ceph_stable_noarch name: ceph_stable_noarch
description: Ceph Stable noarch repo description: Ceph Stable noarch repo
gpgcheck: yes gpgcheck: true
state: present state: present
gpgkey: "{{ ceph_stable_key }}" gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch" baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"

View File

@ -1,15 +1,16 @@
--- ---
- name: configure red hat custom rpm key - name: Configure red hat custom rpm key
rpm_key: ansible.builtin.rpm_key:
key: "{{ ceph_custom_key }}" key: "{{ ceph_custom_key }}"
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
when: ceph_custom_key is defined when: ceph_custom_key is defined
- name: configure red hat custom repository - name: Configure red hat custom repository
get_url: ansible.builtin.get_url:
url: "{{ ceph_custom_repo }}" url: "{{ ceph_custom_repo }}"
dest: /etc/yum.repos.d dest: /etc/yum.repos.d
owner: root owner: root
group: root group: root
mode: "0644"

View File

@ -1,21 +1,22 @@
--- ---
- name: fetch ceph red hat development repository - name: Fetch ceph red hat development repository
uri: ansible.builtin.uri:
# Use the centos repo since we don't currently have a dedicated red hat repo # Use the centos repo since we don't currently have a dedicated red hat repo
url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}" url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}"
return_content: yes return_content: true
register: ceph_dev_yum_repo register: ceph_dev_yum_repo
- name: configure ceph red hat development repository - name: Configure ceph red hat development repository
copy: ansible.builtin.copy:
content: "{{ ceph_dev_yum_repo.content }}" content: "{{ ceph_dev_yum_repo.content }}"
dest: /etc/yum.repos.d/ceph-dev.repo dest: /etc/yum.repos.d/ceph-dev.repo
owner: root owner: root
group: root group: root
backup: yes mode: "0644"
backup: true
- name: remove ceph_stable repositories - name: Remove ceph_stable repositories
yum_repository: ansible.builtin.yum_repository:
name: '{{ item }}' name: '{{ item }}'
file: ceph_stable file: ceph_stable
state: absent state: absent

View File

@ -1,3 +1,3 @@
--- ---
- name: include prerequisite_rhcs_cdn_install.yml - name: Include prerequisite_rhcs_cdn_install.yml
include_tasks: prerequisite_rhcs_cdn_install.yml ansible.builtin.include_tasks: prerequisite_rhcs_cdn_install.yml

View File

@ -1,8 +1,8 @@
--- ---
- name: configure openSUSE ceph OBS repository - name: Configure openSUSE ceph OBS repository
zypper_repository: community.general.zypper_repository:
name: "OBS:filesystems:ceph:{{ ceph_release }}" name: "OBS:filesystems:ceph:{{ ceph_release }}"
state: present state: present
repo: "{{ ceph_obs_repo }}" repo: "{{ ceph_obs_repo }}"
auto_import_keys: yes auto_import_keys: true
autorefresh: yes autorefresh: true

View File

@ -1,71 +1,71 @@
--- ---
- name: include configure_repository.yml - name: Include configure_repository.yml
include_tasks: configure_repository.yml ansible.builtin.include_tasks: configure_repository.yml
tags: package-configure tags: package-configure
- name: include installs/install_redhat_packages.yml - name: Include installs/install_redhat_packages.yml
include_tasks: installs/install_redhat_packages.yml ansible.builtin.include_tasks: installs/install_redhat_packages.yml
when: when:
- ansible_facts['os_family'] == 'RedHat' - ansible_facts['os_family'] == 'RedHat'
- (ceph_origin == 'repository' or ceph_origin == 'distro') - (ceph_origin == 'repository' or ceph_origin == 'distro')
tags: package-install tags: package-install
- name: include installs/install_suse_packages.yml - name: Include installs/install_suse_packages.yml
include_tasks: installs/install_suse_packages.yml ansible.builtin.include_tasks: installs/install_suse_packages.yml
when: ansible_facts['os_family'] == 'Suse' when: ansible_facts['os_family'] == 'Suse'
tags: package-install tags: package-install
- name: include installs/install_on_debian.yml - name: Include installs/install_on_debian.yml
include_tasks: installs/install_on_debian.yml ansible.builtin.include_tasks: installs/install_on_debian.yml
tags: package-install tags: package-install
when: ansible_facts['os_family'] == 'Debian' when: ansible_facts['os_family'] == 'Debian'
- name: include_tasks installs/install_on_clear.yml - name: Include_tasks installs/install_on_clear.yml
include_tasks: installs/install_on_clear.yml ansible.builtin.include_tasks: installs/install_on_clear.yml
when: ansible_facts['os_family'] == 'ClearLinux' when: ansible_facts['os_family'] == 'ClearLinux'
tags: package-install tags: package-install
- name: get ceph version - name: Get ceph version
command: ceph --version ansible.builtin.command: ceph --version
changed_when: false changed_when: false
check_mode: no check_mode: false
register: ceph_version register: ceph_version
- name: set_fact ceph_version - name: Set_fact ceph_version
set_fact: ansible.builtin.set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory # override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
- name: include release-rhcs.yml - name: Include release-rhcs.yml
include_tasks: release-rhcs.yml ansible.builtin.include_tasks: release-rhcs.yml
when: ceph_repository in ['rhcs', 'dev'] when: ceph_repository in ['rhcs', 'dev']
or or
ceph_origin == 'distro' ceph_origin == 'distro'
tags: always tags: always
- name: set_fact ceph_release - override ceph_release with ceph_stable_release - name: Set_fact ceph_release - override ceph_release with ceph_stable_release
set_fact: ansible.builtin.set_fact:
ceph_release: "{{ ceph_stable_release }}" ceph_release: "{{ ceph_stable_release }}"
when: when:
- ceph_origin == 'repository' - ceph_origin == 'repository'
- ceph_repository not in ['dev', 'rhcs', 'custom'] - ceph_repository not in ['dev', 'rhcs', 'custom']
tags: always tags: always
- name: include create_rbd_client_dir.yml - name: Include create_rbd_client_dir.yml
include_tasks: create_rbd_client_dir.yml ansible.builtin.include_tasks: create_rbd_client_dir.yml
- name: include configure_cluster_name.yml - name: Include configure_cluster_name.yml
include_tasks: configure_cluster_name.yml ansible.builtin.include_tasks: configure_cluster_name.yml
- name: include configure_memory_allocator.yml - name: Include configure_memory_allocator.yml
include_tasks: configure_memory_allocator.yml ansible.builtin.include_tasks: configure_memory_allocator.yml
when: when:
- (ceph_tcmalloc_max_total_thread_cache | int) > 0 - (ceph_tcmalloc_max_total_thread_cache | int) > 0
- (ceph_origin == 'repository' or ceph_origin == 'distro') - (ceph_origin == 'repository' or ceph_origin == 'distro')
- name: include selinux.yml - name: Include selinux.yml
include_tasks: selinux.yml ansible.builtin.include_tasks: selinux.yml
when: when:
- ansible_facts['os_family'] == 'RedHat' - ansible_facts['os_family'] == 'RedHat'
- inventory_hostname in groups.get(nfs_group_name, []) - inventory_hostname in groups.get(nfs_group_name, [])
or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])

View File

@ -1,45 +1,45 @@
--- ---
- name: set_fact ceph_release jewel - name: Set_fact ceph_release jewel
set_fact: ansible.builtin.set_fact:
ceph_release: jewel ceph_release: jewel
when: ceph_version.split('.')[0] is version('10', '==') when: ceph_version.split('.')[0] is version('10', '==')
- name: set_fact ceph_release kraken - name: Set_fact ceph_release kraken
set_fact: ansible.builtin.set_fact:
ceph_release: kraken ceph_release: kraken
when: ceph_version.split('.')[0] is version('11', '==') when: ceph_version.split('.')[0] is version('11', '==')
- name: set_fact ceph_release luminous - name: Set_fact ceph_release luminous
set_fact: ansible.builtin.set_fact:
ceph_release: luminous ceph_release: luminous
when: ceph_version.split('.')[0] is version('12', '==') when: ceph_version.split('.')[0] is version('12', '==')
- name: set_fact ceph_release mimic - name: Set_fact ceph_release mimic
set_fact: ansible.builtin.set_fact:
ceph_release: mimic ceph_release: mimic
when: ceph_version.split('.')[0] is version('13', '==') when: ceph_version.split('.')[0] is version('13', '==')
- name: set_fact ceph_release nautilus - name: Set_fact ceph_release nautilus
set_fact: ansible.builtin.set_fact:
ceph_release: nautilus ceph_release: nautilus
when: ceph_version.split('.')[0] is version('14', '==') when: ceph_version.split('.')[0] is version('14', '==')
- name: set_fact ceph_release octopus - name: Set_fact ceph_release octopus
set_fact: ansible.builtin.set_fact:
ceph_release: octopus ceph_release: octopus
when: ceph_version.split('.')[0] is version('15', '==') when: ceph_version.split('.')[0] is version('15', '==')
- name: set_fact ceph_release pacific - name: Set_fact ceph_release pacific
set_fact: ansible.builtin.set_fact:
ceph_release: pacific ceph_release: pacific
when: ceph_version.split('.')[0] is version('16', '==') when: ceph_version.split('.')[0] is version('16', '==')
- name: set_fact ceph_release quincy - name: Set_fact ceph_release quincy
set_fact: ansible.builtin.set_fact:
ceph_release: quincy ceph_release: quincy
when: ceph_version.split('.')[0] is version('17', '==') when: ceph_version.split('.')[0] is version('17', '==')
- name: set_fact ceph_release reef - name: Set_fact ceph_release reef
set_fact: ansible.builtin.set_fact:
ceph_release: reef ceph_release: reef
when: ceph_version.split('.')[0] is version('18', '==') when: ceph_version.split('.')[0] is version('18', '==')

View File

@ -1,17 +1,17 @@
--- ---
- name: if selinux is not disabled - name: If selinux is not disabled
when: ansible_facts['selinux']['status'] == 'enabled' when: ansible_facts['selinux']['status'] == 'enabled'
block: block:
- name: install policycoreutils-python - name: Install policycoreutils-python
package: ansible.builtin.package:
name: policycoreutils-python name: policycoreutils-python
state: present state: present
register: result register: result
until: result is succeeded until: result is succeeded
when: ansible_facts['distribution_major_version'] == '7' when: ansible_facts['distribution_major_version'] == '7'
- name: install python3-policycoreutils on RHEL 8 - name: Install python3-policycoreutils on RHEL 8
package: ansible.builtin.package:
name: python3-policycoreutils name: python3-policycoreutils
state: present state: present
register: result register: result

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Guillaume Abrioux author: Guillaume Abrioux
description: Handles ceph-ansible initial configuration description: Handles ceph-ansible initial configuration
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,11 +1,11 @@
--- ---
- name: create ceph initial directories - name: Create ceph initial directories
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: directory state: directory
owner: "{{ ceph_uid }}" owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}" group: "{{ ceph_uid }}"
mode: 0755 mode: "0755"
loop: loop:
- /etc/ceph - /etc/ceph
- /var/lib/ceph/ - /var/lib/ceph/

View File

@ -1,30 +1,33 @@
--- ---
- name: include create_ceph_initial_dirs.yml - name: Include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml ansible.builtin.include_tasks: create_ceph_initial_dirs.yml
when: containerized_deployment | bool when: containerized_deployment | bool
- name: include_tasks rgw_systemd_environment_file.yml - name: Include_tasks rgw_systemd_environment_file.yml
include_tasks: rgw_systemd_environment_file.yml ansible.builtin.include_tasks: rgw_systemd_environment_file.yml
when: inventory_hostname in groups.get(rgw_group_name, []) when: inventory_hostname in groups.get(rgw_group_name, [])
- name: config file operations related to OSDs - name: Config file operations related to OSDs
when: when:
- inventory_hostname in groups.get(osd_group_name, []) - inventory_hostname in groups.get(osd_group_name, [])
# the rolling_update.yml playbook sets num_osds to the number of currently # the rolling_update.yml playbook sets num_osds to the number of currently
# running osds # running osds
- not rolling_update | bool - not rolling_update | bool
block: block:
- name: reset num_osds - name: Reset num_osds
set_fact: ansible.builtin.set_fact:
num_osds: 0 num_osds: 0
- name: count number of osds for lvm scenario - name: Count number of osds for lvm scenario
set_fact: ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + (lvm_volumes | length | int) }}" num_osds: "{{ num_osds | int + (lvm_volumes | length | int) }}"
when: lvm_volumes | default([]) | length > 0 when: lvm_volumes | default([]) | length > 0
- block: - name: Ceph-volume pre-requisites tasks
- name: look up for ceph-volume rejected devices when:
- devices | default([]) | length > 0
block:
- name: Look up for ceph-volume rejected devices
ceph_volume: ceph_volume:
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
action: "inventory" action: "inventory"
@ -35,17 +38,17 @@
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8 PYTHONIOENCODING: utf-8
- name: set_fact rejected_devices - name: Set_fact rejected_devices
set_fact: ansible.builtin.set_fact:
_rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}" _rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}"
with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}" with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}"
when: "'Used by ceph-disk' in item.rejected_reasons" when: "'Used by ceph-disk' in item.rejected_reasons"
- name: set_fact _devices - name: Set_fact _devices
set_fact: ansible.builtin.set_fact:
_devices: "{{ devices | difference(_rejected_devices | default([])) }}" _devices: "{{ devices | difference(_rejected_devices | default([])) }}"
- name: run 'ceph-volume lvm batch --report' to see how many osds are to be created - name: Run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume: ceph_volume:
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}" objectstore: "{{ osd_objectstore }}"
@ -62,23 +65,21 @@
PYTHONIOENCODING: utf-8 PYTHONIOENCODING: utf-8
when: _devices | default([]) | length > 0 when: _devices | default([]) | length > 0
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report) - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report)
set_fact: ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}" num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
when: when:
- (lvm_batch_report.stdout | default('{}') | from_json) is mapping - (lvm_batch_report.stdout | default('{}') | from_json) is mapping
- (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
- name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report) - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report)
set_fact: ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}" num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
when: when:
- (lvm_batch_report.stdout | default('{}') | from_json) is not mapping - (lvm_batch_report.stdout | default('{}') | from_json) is not mapping
- (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
when:
- devices | default([]) | length > 0
- name: run 'ceph-volume lvm list' to see how many osds have already been created - name: Run 'ceph-volume lvm list' to see how many osds have already been created
ceph_volume: ceph_volume:
action: "list" action: "list"
register: lvm_list register: lvm_list
@ -89,31 +90,31 @@
PYTHONIOENCODING: utf-8 PYTHONIOENCODING: utf-8
changed_when: false changed_when: false
- name: set_fact num_osds (add existing osds) - name: Set_fact num_osds (add existing osds)
set_fact: ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + (lvm_list.stdout | default('{}') | from_json | dict2items | map(attribute='value') | flatten | map(attribute='devices') | sum(start=[]) | difference(lvm_volumes | default([]) | map(attribute='data')) | length | int) }}" num_osds: "{{ num_osds | int + (lvm_list.stdout | default('{}') | from_json | dict2items | map(attribute='value') | flatten | map(attribute='devices') | sum(start=[]) | difference(lvm_volumes | default([]) | map(attribute='data')) | length | int) }}"
- name: set osd related config facts - name: Set osd related config facts
when: inventory_hostname in groups.get(osd_group_name, []) when: inventory_hostname in groups.get(osd_group_name, [])
block: block:
- name: set_fact _osd_memory_target, override from ceph_conf_overrides - name: Set_fact _osd_memory_target, override from ceph_conf_overrides
set_fact: ansible.builtin.set_fact:
_osd_memory_target: "{{ item }}" _osd_memory_target: "{{ item }}"
loop: loop:
- "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}" - "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}"
- "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}" - "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
when: item when: item
- name: set_fact _osd_memory_target - name: Set_fact _osd_memory_target
set_fact: ansible.builtin.set_fact:
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}" _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
when: when:
- _osd_memory_target is undefined - _osd_memory_target is undefined
- num_osds | default(0) | int > 0 - num_osds | default(0) | int > 0
- ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float) - ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float)
- name: create ceph conf directory - name: Create ceph conf directory
file: ansible.builtin.file:
path: "/etc/ceph" path: "/etc/ceph"
state: directory state: directory
owner: "ceph" owner: "ceph"
@ -121,13 +122,13 @@
mode: "{{ ceph_directories_mode }}" mode: "{{ ceph_directories_mode }}"
when: not containerized_deployment | bool when: not containerized_deployment | bool
- name: import_role ceph-facts - name: Import_role ceph-facts
import_role: ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: set_radosgw_address.yml tasks_from: set_radosgw_address.yml
when: inventory_hostname in groups.get(rgw_group_name, []) when: inventory_hostname in groups.get(rgw_group_name, [])
- name: "generate {{ cluster }}.conf configuration file" - name: Generate Ceph file
openstack.config_template.config_template: openstack.config_template.config_template:
src: "ceph.conf.j2" src: "ceph.conf.j2"
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
@ -136,10 +137,10 @@
mode: "0644" mode: "0644"
config_type: ini config_type: ini
notify: notify:
- restart ceph mons - Restart ceph mons
- restart ceph osds - Restart ceph osds
- restart ceph mdss - Restart ceph mdss
- restart ceph rgws - Restart ceph rgws
- restart ceph mgrs - Restart ceph mgrs
- restart ceph rbdmirrors - Restart ceph rbdmirrors
- restart ceph rbd-target-api-gw - Restart ceph rbd-target-api-gw

View File

@ -1,6 +1,6 @@
--- ---
- name: create rados gateway instance directories - name: Create rados gateway instance directories
file: ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -8,8 +8,8 @@
mode: "{{ ceph_directories_mode | default('0755') }}" mode: "{{ ceph_directories_mode | default('0755') }}"
with_items: "{{ rgw_instances }}" with_items: "{{ rgw_instances }}"
- name: generate environment file - name: Generate environment file
copy: ansible.builtin.copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile" dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
owner: "root" owner: "root"
group: "root" group: "root"
@ -19,4 +19,4 @@
with_items: "{{ rgw_instances }}" with_items: "{{ rgw_instances }}"
when: when:
- containerized_deployment | bool - containerized_deployment | bool
- rgw_instances is defined - rgw_instances is defined

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Sébastien Han author: Sébastien Han
description: Installs Ceph description: Installs Ceph
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,6 +1,6 @@
--- ---
- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image" - name: Pulling Ceph container image
command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false changed_when: false
register: docker_image register: docker_image
until: docker_image.rc == 0 until: docker_image.rc == 0
@ -12,8 +12,8 @@
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}" NO_PROXY: "{{ ceph_docker_no_proxy }}"
- name: "pulling alertmanager/prometheus/grafana container images" - name: Pulling alertmanager/prometheus/grafana container images
command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}" ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}"
changed_when: false changed_when: false
register: monitoring_images register: monitoring_images
until: monitoring_images.rc == 0 until: monitoring_images.rc == 0
@ -31,8 +31,8 @@
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}" NO_PROXY: "{{ ceph_docker_no_proxy }}"
- name: "pulling node-exporter container image" - name: Pulling node-exporter container image
command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}" ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}"
changed_when: false changed_when: false
register: node_exporter_image register: node_exporter_image
until: node_exporter_image.rc == 0 until: node_exporter_image.rc == 0
@ -54,27 +54,29 @@
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}" NO_PROXY: "{{ ceph_docker_no_proxy }}"
- name: export local ceph dev image - name: Export local ceph dev image
command: > ansible.builtin.command: >
{{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
"{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
delegate_to: localhost delegate_to: localhost
changed_when: false
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true run_once: true
- name: copy ceph dev image file - name: Copy ceph dev image file
copy: ansible.builtin.copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
mode: "0644"
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
- name: load ceph dev image - name: Load ceph dev image
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" ansible.builtin.command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
changed_when: false
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
- name: remove tmp ceph dev image file - name: Remove tmp ceph dev image file
file: ansible.builtin.file:
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
state: absent state: absent
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)

View File

@ -1,38 +1,39 @@
--- ---
- name: generate systemd ceph-mon target file - name: Generate systemd ceph-mon target file
copy: ansible.builtin.copy:
src: ceph.target src: ceph.target
dest: /etc/systemd/system/ceph.target dest: /etc/systemd/system/ceph.target
mode: "0644"
- name: enable ceph.target - name: Enable ceph.target
service: ansible.builtin.service:
name: ceph.target name: ceph.target
enabled: yes enabled: true
daemon_reload: yes daemon_reload: true
- name: include prerequisites.yml - name: Include prerequisites.yml
include_tasks: prerequisites.yml ansible.builtin.include_tasks: prerequisites.yml
- name: include registry.yml - name: Include registry.yml
include_tasks: registry.yml ansible.builtin.include_tasks: registry.yml
when: ceph_docker_registry_auth | bool when: ceph_docker_registry_auth | bool
- name: include fetch_image.yml - name: Include fetch_image.yml
include_tasks: fetch_image.yml ansible.builtin.include_tasks: fetch_image.yml
tags: fetch_container_image tags: fetch_container_image
- name: get ceph version - name: Get ceph version
command: > ansible.builtin.command: >
{{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph {{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph
{{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} {{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }}
--version --version
changed_when: false changed_when: false
check_mode: no check_mode: false
register: ceph_version register: ceph_version
- name: set_fact ceph_version ceph_version.stdout.split - name: Set_fact ceph_version ceph_version.stdout.split
set_fact: ansible.builtin.set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
- name: include release.yml - name: Include release.yml
include_tasks: release.yml ansible.builtin.include_tasks: release.yml

View File

@ -1,52 +1,52 @@
--- ---
- name: lvmetad tasks related - name: Lvmetad tasks related
when: when:
- inventory_hostname in groups.get(osd_group_name, []) - inventory_hostname in groups.get(osd_group_name, [])
- lvmetad_disabled | default(False) | bool - lvmetad_disabled | default(False) | bool
- ansible_facts['os_family'] == 'RedHat' - ansible_facts['os_family'] == 'RedHat'
- ansible_facts['distribution_major_version'] | int == 7 - ansible_facts['distribution_major_version'] | int == 7
block: block:
- name: stop lvmetad - name: Stop lvmetad
service: ansible.builtin.service:
name: lvm2-lvmetad name: lvm2-lvmetad
state: stopped state: stopped
- name: disable and mask lvmetad service - name: Disable and mask lvmetad service
service: ansible.builtin.systemd:
name: lvm2-lvmetad name: lvm2-lvmetad
enabled: no enabled: false
masked: yes masked: true
- name: remove ceph udev rules - name: Remove ceph udev rules
file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item }}"
state: absent state: absent
with_items: with_items:
- /usr/lib/udev/rules.d/95-ceph-osd.rules - /usr/lib/udev/rules.d/95-ceph-osd.rules
- /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
- name: ensure tmpfiles.d is present - name: Ensure tmpfiles.d is present
lineinfile: ansible.builtin.lineinfile:
path: /etc/tmpfiles.d/ceph-common.conf path: /etc/tmpfiles.d/ceph-common.conf
line: "d /run/ceph 0770 root root -" line: "d /run/ceph 0770 root root -"
owner: root owner: root
group: root group: root
mode: 0644 mode: "0644"
state: present state: present
create: yes create: true
- name: restore certificates selinux context - name: Restore certificates selinux context
when: when:
- ansible_facts['os_family'] == 'RedHat' - ansible_facts['os_family'] == 'RedHat'
- inventory_hostname in groups.get(mon_group_name, []) - inventory_hostname in groups.get(mon_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(rgw_group_name, [])
command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted ansible.builtin.command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted
changed_when: false changed_when: false
- name: install python3 on osd nodes - name: Install python3 on osd nodes
package: ansible.builtin.package:
name: python3 name: python3
state: present state: present
when: when:
- inventory_hostname in groups.get(osd_group_name, []) - inventory_hostname in groups.get(osd_group_name, [])
- ansible_facts['os_family'] == 'RedHat' - ansible_facts['os_family'] == 'RedHat'

View File

@ -1,11 +1,11 @@
--- ---
- name: container registry authentication - name: Container registry authentication
command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}' ansible.builtin.command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
args: args:
stdin: '{{ ceph_docker_registry_password }}' stdin: '{{ ceph_docker_registry_password }}'
stdin_add_newline: no stdin_add_newline: false
changed_when: false changed_when: false
environment: environment:
HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}" NO_PROXY: "{{ ceph_docker_no_proxy }}"

View File

@ -1,45 +1,45 @@
--- ---
- name: set_fact ceph_release jewel - name: Set_fact ceph_release jewel
set_fact: ansible.builtin.set_fact:
ceph_release: jewel ceph_release: jewel
when: ceph_version.split('.')[0] is version('10', '==') when: ceph_version.split('.')[0] is version('10', '==')
- name: set_fact ceph_release kraken - name: Set_fact ceph_release kraken
set_fact: ansible.builtin.set_fact:
ceph_release: kraken ceph_release: kraken
when: ceph_version.split('.')[0] is version('11', '==') when: ceph_version.split('.')[0] is version('11', '==')
- name: set_fact ceph_release luminous - name: Set_fact ceph_release luminous
set_fact: ansible.builtin.set_fact:
ceph_release: luminous ceph_release: luminous
when: ceph_version.split('.')[0] is version('12', '==') when: ceph_version.split('.')[0] is version('12', '==')
- name: set_fact ceph_release mimic - name: Set_fact ceph_release mimic
set_fact: ansible.builtin.set_fact:
ceph_release: mimic ceph_release: mimic
when: ceph_version.split('.')[0] is version('13', '==') when: ceph_version.split('.')[0] is version('13', '==')
- name: set_fact ceph_release nautilus - name: Set_fact ceph_release nautilus
set_fact: ansible.builtin.set_fact:
ceph_release: nautilus ceph_release: nautilus
when: ceph_version.split('.')[0] is version('14', '==') when: ceph_version.split('.')[0] is version('14', '==')
- name: set_fact ceph_release octopus - name: Set_fact ceph_release octopus
set_fact: ansible.builtin.set_fact:
ceph_release: octopus ceph_release: octopus
when: ceph_version.split('.')[0] is version('15', '==') when: ceph_version.split('.')[0] is version('15', '==')
- name: set_fact ceph_release pacific - name: Set_fact ceph_release pacific
set_fact: ansible.builtin.set_fact:
ceph_release: pacific ceph_release: pacific
when: ceph_version.split('.')[0] is version('16', '==') when: ceph_version.split('.')[0] is version('16', '==')
- name: set_fact ceph_release quincy - name: Set_fact ceph_release quincy
set_fact: ansible.builtin.set_fact:
ceph_release: quincy ceph_release: quincy
when: ceph_version.split('.')[0] is version('17', '==') when: ceph_version.split('.')[0] is version('17', '==')
- name: set_fact ceph_release reef - name: Set_fact ceph_release reef
set_fact: ansible.builtin.set_fact:
ceph_release: reef ceph_release: reef
when: ceph_version.split('.')[0] is version('18', '==') when: ceph_version.split('.')[0] is version('18', '==')

View File

@ -4,14 +4,14 @@ galaxy_info:
author: Guillaume Abrioux author: Guillaume Abrioux
description: Handles container installation prerequisites description: Handles container installation prerequisites
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: Ubuntu - name: Ubuntu
versions: versions:
- xenial - xenial
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,4 +1,4 @@
--- ---
- name: include pre_requisites/prerequisites.yml - name: Include pre_requisites/prerequisites.yml
include_tasks: pre_requisites/prerequisites.yml ansible.builtin.include_tasks: pre_requisites/prerequisites.yml
when: not is_atomic | bool when: not is_atomic | bool

View File

@ -1,31 +1,31 @@
--- ---
- name: uninstall old docker versions - name: Uninstall old docker versions
package: ansible.builtin.package:
name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc'] name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc']
state: absent state: absent
when: container_package_name == 'docker-ce' when: container_package_name == 'docker-ce'
- name: allow apt to use a repository over https (debian) - name: Allow apt to use a repository over https (debian)
package: ansible.builtin.package:
name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common'] name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
update_cache: yes update_cache: true
register: result register: result
until: result is succeeded until: result is succeeded
- name: add docker's gpg key - name: Add docker's gpg key
apt_key: ansible.builtin.apt_key:
url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg" url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
register: result register: result
until: result is succeeded until: result is succeeded
when: container_package_name == 'docker-ce' when: container_package_name == 'docker-ce'
- name: add docker repository - name: Add docker repository
apt_repository: ansible.builtin.apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable" repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
when: container_package_name == 'docker-ce' when: container_package_name == 'docker-ce'
- name: add podman ppa repository - name: Add podman ppa repository
apt_repository: ansible.builtin.apt_repository:
repo: "ppa:projectatomic/ppa" repo: "ppa:projectatomic/ppa"
when: when:
- container_package_name == 'podman' - container_package_name == 'podman'

View File

@ -1,54 +1,55 @@
--- ---
- name: include specific variables - name: Include specific variables
include_vars: "{{ item }}" ansible.builtin.include_vars: "{{ item }}"
with_first_found: with_first_found:
- "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml" - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
- "{{ ansible_facts['os_family'] }}.yml" - "{{ ansible_facts['os_family'] }}.yml"
when: container_package_name is undefined and container_service_name is undefined when: container_package_name is undefined and container_service_name is undefined
- name: debian based systems tasks - name: Debian based systems tasks
include_tasks: debian_prerequisites.yml ansible.builtin.include_tasks: debian_prerequisites.yml
when: when:
- ansible_facts['os_family'] == 'Debian' - ansible_facts['os_family'] == 'Debian'
tags: with_pkg tags: with_pkg
- name: install container packages - name: Install container packages
package: ansible.builtin.package:
name: '{{ container_package_name }}' name: '{{ container_package_name }}'
update_cache: true update_cache: true
register: result register: result
until: result is succeeded until: result is succeeded
tags: with_pkg tags: with_pkg
- name: install lvm2 package - name: Install lvm2 package
package: ansible.builtin.package:
name: lvm2 name: lvm2
register: result register: result
until: result is succeeded until: result is succeeded
tags: with_pkg tags: with_pkg
when: inventory_hostname in groups.get(osd_group_name, []) when: inventory_hostname in groups.get(osd_group_name, [])
- name: extra configuration for docker - name: Extra configuration for docker
when: container_service_name == 'docker' when: container_service_name == 'docker'
block: block:
- name: create the systemd docker override directory - name: Create the systemd docker override directory
file: ansible.builtin.file:
path: /etc/systemd/system/docker.service.d path: /etc/systemd/system/docker.service.d
state: directory state: directory
mode: "0755"
when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
- name: create the systemd docker override file - name: Create the systemd docker override file
template: ansible.builtin.template:
src: docker-proxy.conf.j2 src: docker-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/proxy.conf dest: /etc/systemd/system/docker.service.d/proxy.conf
mode: 0600 mode: "0600"
owner: root owner: root
group: root group: root
register: proxy_created register: proxy_created
when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
- name: remove docker proxy configuration - name: Remove docker proxy configuration
file: ansible.builtin.file:
path: /etc/systemd/system/docker.service.d/proxy.conf path: /etc/systemd/system/docker.service.d/proxy.conf
state: absent state: absent
register: proxy_removed register: proxy_removed
@ -60,17 +61,17 @@
# have an immediate effect and not wait the end of the play. # have an immediate effect and not wait the end of the play.
# using flush_handlers via the meta action plugin isn't enough too because # using flush_handlers via the meta action plugin isn't enough too because
# it flushes all handlers and not only the one notified in this role. # it flushes all handlers and not only the one notified in this role.
- name: restart docker - name: Restart docker
systemd: ansible.builtin.systemd:
name: "{{ container_service_name }}" name: "{{ container_service_name }}"
state: restarted state: restarted
daemon_reload: yes daemon_reload: true
when: proxy_created.changed | bool or proxy_removed.changed | bool when: proxy_created.changed | bool or proxy_removed.changed | bool
- name: start container service - name: Start container service
service: ansible.builtin.service:
name: '{{ container_service_name }}' name: '{{ container_service_name }}'
state: started state: started
enabled: yes enabled: true
tags: tags:
with_pkg with_pkg

View File

@ -4,12 +4,11 @@ galaxy_info:
author: Guillaume Abrioux author: Guillaume Abrioux
description: Deploy ceph-crash description: Deploy ceph-crash
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 7 - 'all'
- 8
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,8 +1,8 @@
--- ---
- name: create and copy client.crash keyring - name: Create and copy client.crash keyring
when: cephx | bool when: cephx | bool
block: block:
- name: create client.crash keyring - name: Create client.crash keyring
ceph_key: ceph_key:
name: "client.crash" name: "client.crash"
caps: caps:
@ -10,7 +10,7 @@
mgr: 'allow profile crash' mgr: 'allow profile crash'
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}" dest: "{{ ceph_conf_key_directory }}"
import_key: True import_key: true
mode: "{{ ceph_keyring_permissions }}" mode: "{{ ceph_keyring_permissions }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -18,10 +18,10 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
run_once: True run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: get keys from monitors - name: Get keys from monitors
ceph_key: ceph_key:
name: client.crash name: client.crash
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -35,8 +35,8 @@
run_once: true run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: copy ceph key(s) if needed - name: Copy ceph key(s) if needed
copy: ansible.builtin.copy:
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring" dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring"
content: "{{ _crash_keys.stdout + '\n' }}" content: "{{ _crash_keys.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -44,24 +44,24 @@
mode: "{{ ceph_keyring_permissions }}" mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}" no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: start ceph-crash daemon - name: Start ceph-crash daemon
when: containerized_deployment | bool when: containerized_deployment | bool
block: block:
- name: create /var/lib/ceph/crash/posted - name: Create /var/lib/ceph/crash/posted
file: ansible.builtin.file:
path: /var/lib/ceph/crash/posted path: /var/lib/ceph/crash/posted
state: directory state: directory
mode: '0755' mode: '0755'
owner: "{{ ceph_uid }}" owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}" group: "{{ ceph_uid }}"
- name: include_tasks systemd.yml - name: Include_tasks systemd.yml
include_tasks: systemd.yml ansible.builtin.include_tasks: systemd.yml
- name: start the ceph-crash service - name: Start the ceph-crash service
systemd: ansible.builtin.systemd:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: started state: started
enabled: yes enabled: true
masked: no masked: false
daemon_reload: yes daemon_reload: true

View File

@ -1,9 +1,9 @@
--- ---
- name: generate systemd unit file for ceph-crash container - name: Generate systemd unit file for ceph-crash container
template: ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-crash.service.j2" src: "{{ role_path }}/templates/ceph-crash.service.j2"
dest: /etc/systemd/system/ceph-crash@.service dest: /etc/systemd/system/ceph-crash@.service
owner: "root" owner: "root"
group: "root" group: "root"
mode: "0644" mode: "0644"
notify: restart ceph crash notify: Restart ceph crash

View File

@ -4,11 +4,11 @@ galaxy_info:
author: Boris Ranto author: Boris Ranto
description: Configures Ceph Dashboard description: Configures Ceph Dashboard
license: Apache license: Apache
min_ansible_version: 2.4 min_ansible_version: '2.4'
platforms: platforms:
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

View File

@ -1,36 +1,38 @@
--- ---
- import_role: - name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts name: ceph-facts
tasks_from: container_binary.yml tasks_from: container_binary.yml
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
delegate_facts: true delegate_facts: true
- name: set_fact container_exec_cmd - name: Set_fact container_exec_cmd
set_fact: ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool when: containerized_deployment | bool
- name: set_fact container_run_cmd - name: Set_fact container_run_cmd
set_fact: ansible.builtin.set_fact:
ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
- name: get current mgr backend - ipv4 - name: Get current mgr backend - ipv4
set_fact: ansible.builtin.set_fact:
dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}" dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}"
when: ip_version == 'ipv4' when: ip_version == 'ipv4'
loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}" loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
delegate_facts: True delegate_facts: true
- name: get current mgr backend - ipv6 - name: Get current mgr backend - ipv6
set_fact: ansible.builtin.set_fact:
dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}" dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}"
when: ip_version == 'ipv6' when: ip_version == 'ipv6'
loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}" loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
delegate_to: "{{ item }}" delegate_to: "{{ item }}"
delegate_facts: True delegate_facts: true
- include_role: - name: Include ceph-facts role
ansible.builtin.include_role:
name: ceph-facts name: ceph-facts
tasks_from: set_radosgw_address.yml tasks_from: set_radosgw_address.yml
loop: "{{ groups.get(rgw_group_name, []) }}" loop: "{{ groups.get(rgw_group_name, []) }}"
@ -39,100 +41,103 @@
loop_var: ceph_dashboard_call_item loop_var: ceph_dashboard_call_item
when: inventory_hostname in groups.get(rgw_group_name, []) when: inventory_hostname in groups.get(rgw_group_name, [])
- name: disable SSL for dashboard - name: Disable SSL for dashboard
when: dashboard_protocol == "http" when: dashboard_protocol == "http"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
block: block:
- name: get SSL status for dashboard - name: Get SSL status for dashboard
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl"
changed_when: false changed_when: false
register: current_ssl_for_dashboard register: current_ssl_for_dashboard
- name: disable SSL for dashboard - name: Disable SSL for dashboard
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false"
when: current_ssl_for_dashboard.stdout == "true" changed_when: false
when: current_ssl_for_dashboard.stdout == "true"
- name: with SSL for dashboard - name: With SSL for dashboard
when: dashboard_protocol == "https" when: dashboard_protocol == "https"
block: block:
- name: enable SSL for dashboard - name: Enable SSL for dashboard
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false
- name: copy dashboard SSL certificate file - name: Copy dashboard SSL certificate file
copy: ansible.builtin.copy:
src: "{{ dashboard_crt }}" src: "{{ dashboard_crt }}"
dest: "/etc/ceph/ceph-dashboard.crt" dest: "/etc/ceph/ceph-dashboard.crt"
owner: root owner: root
group: root group: root
mode: 0440 mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}" remote_src: "{{ dashboard_tls_external | bool }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: dashboard_crt | length > 0 when: dashboard_crt | length > 0
- name: copy dashboard SSL certificate key - name: Copy dashboard SSL certificate key
copy: ansible.builtin.copy:
src: "{{ dashboard_key }}" src: "{{ dashboard_key }}"
dest: "/etc/ceph/ceph-dashboard.key" dest: "/etc/ceph/ceph-dashboard.key"
owner: root owner: root
group: root group: root
mode: 0440 mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}" remote_src: "{{ dashboard_tls_external | bool }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: dashboard_key | length > 0 when: dashboard_key | length > 0
- name: generate and copy self-signed certificate - name: Generate and copy self-signed certificate
when: dashboard_key | length == 0 or dashboard_crt | length == 0 when: dashboard_key | length == 0 or dashboard_crt | length == 0
run_once: true run_once: true
block: block:
- name: set_fact subj_alt_names - name: Set_fact subj_alt_names
set_fact: ansible.builtin.set_fact:
subj_alt_names: > subj_alt_names: >
{% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}{%- endfor -%}
DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}
{%- endfor -%}
- name: create tempfile for openssl certificate and key generation - name: Create tempfile for openssl certificate and key generation
tempfile: ansible.builtin.tempfile:
state: file state: file
register: openssl_config_file register: openssl_config_file
- name: copy the openssl configuration file - name: Copy the openssl configuration file
copy: ansible.builtin.copy:
src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}" src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}"
dest: '{{ openssl_config_file.path }}' dest: '{{ openssl_config_file.path }}'
remote_src: true remote_src: true
mode: "0644"
- name: add subjectAltName to the openssl configuration - name: Add subjectAltName to the openssl configuration
ini_file: community.general.ini_file:
path: '{{ openssl_config_file.path }}' path: '{{ openssl_config_file.path }}'
section: v3_ca section: v3_ca
option: subjectAltName option: subjectAltName
value: '{{ subj_alt_names | trim }}' value: '{{ subj_alt_names | trim }}'
mode: "0644"
- name: generate a Self Signed OpenSSL certificate for dashboard - name: Generate a Self Signed OpenSSL certificate for dashboard
shell: | ansible.builtin.shell: |
test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \ test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \
openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
changed_when: false
- name: remove the openssl tempfile - name: Remove the openssl tempfile
file: ansible.builtin.file:
path: '{{ openssl_config_file.path }}' path: '{{ openssl_config_file.path }}'
state: absent state: absent
- name: slurp self-signed generated certificate for dashboard - name: Slurp self-signed generated certificate for dashboard
slurp: ansible.builtin.slurp:
src: "/etc/ceph/{{ item }}" src: "/etc/ceph/{{ item }}"
run_once: True run_once: true
with_items: with_items:
- 'ceph-dashboard.key' - 'ceph-dashboard.key'
- 'ceph-dashboard.crt' - 'ceph-dashboard.crt'
register: slurp_self_signed_crt register: slurp_self_signed_crt
- name: copy self-signed generated certificate on mons - name: Copy self-signed generated certificate on mons
copy: ansible.builtin.copy:
dest: "{{ item.0.source }}" dest: "{{ item.0.source }}"
content: "{{ item.0.content | b64decode }}" content: "{{ item.0.content | b64decode }}"
owner: "{{ ceph_uid }}" owner: "{{ ceph_uid }}"
@ -143,39 +148,39 @@
- "{{ slurp_self_signed_crt.results }}" - "{{ slurp_self_signed_crt.results }}"
- "{{ groups[mon_group_name] }}" - "{{ groups[mon_group_name] }}"
- name: import dashboard certificate file - name: Import dashboard certificate file
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
- name: import dashboard certificate key - name: Import dashboard certificate key
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
- name: "set the dashboard port ({{ dashboard_port }})" - name: Set the dashboard port
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
- name: "set the dashboard SSL port ({{ dashboard_port }})" - name: Set the dashboard SSL port
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0 failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0
- name: config the current dashboard backend - name: Config the current dashboard backend
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false changed_when: false
run_once: true run_once: true
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}' with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
- name: disable mgr dashboard module (restart) - name: Disable mgr dashboard module (restart)
ceph_mgr_module: ceph_mgr_module:
name: dashboard name: dashboard
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -186,7 +191,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
- name: enable mgr dashboard module (restart) - name: Enable mgr dashboard module (restart)
ceph_mgr_module: ceph_mgr_module:
name: dashboard name: dashboard
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -197,7 +202,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
- name: create dashboard admin user - name: Create dashboard admin user
ceph_dashboard_user: ceph_dashboard_user:
name: "{{ dashboard_admin_user }}" name: "{{ dashboard_admin_user }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -209,30 +214,30 @@
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- name: disable unused dashboard features - name: Disable unused dashboard features
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
with_items: "{{ dashboard_disabled_features }}" with_items: "{{ dashboard_disabled_features }}"
- name: set grafana api user - name: Set grafana api user
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
- name: set grafana api password - name: Set grafana api password
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -"
args: args:
stdin: "{{ grafana_admin_password }}" stdin: "{{ grafana_admin_password }}"
stdin_add_newline: no stdin_add_newline: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
- name: disable ssl verification for grafana - name: Disable ssl verification for grafana
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
@ -240,101 +245,102 @@
- dashboard_protocol == "https" - dashboard_protocol == "https"
- dashboard_grafana_api_no_ssl_verify | bool - dashboard_grafana_api_no_ssl_verify | bool
- name: set alertmanager host - name: Set alertmanager host
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
- name: set prometheus host - name: Set prometheus host
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
- include_tasks: configure_grafana_layouts.yml - name: Include grafana layout tasks
ansible.builtin.include_tasks: configure_grafana_layouts.yml
with_items: '{{ grafana_server_addrs }}' with_items: '{{ grafana_server_addrs }}'
vars: vars:
grafana_server_addr: '{{ item }}' grafana_server_addr: '{{ item }}'
- name: config monitoring api url vip - name: Config monitoring api url vip
run_once: true run_once: true
block: block:
- name: config grafana api url vip - name: Config grafana api url vip
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false changed_when: false
when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0 when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0
- name: config alertmanager api url - name: Config alertmanager api url
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false changed_when: false
when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0 when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0
- name: config prometheus api url - name: Config prometheus api url
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false changed_when: false
when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0 when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0
- name: dashboard object gateway management frontend - name: Dashboard object gateway management frontend
when: groups.get(rgw_group_name, []) | length > 0 when: groups.get(rgw_group_name, []) | length > 0
run_once: true run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
block: block:
- name: set the rgw credentials - name: Set the rgw credentials
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials"
changed_when: false changed_when: false
register: result register: result
until: result is succeeded until: result is succeeded
retries: 5 retries: 5
- name: set the rgw admin resource - name: Set the rgw admin resource
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
changed_when: false changed_when: false
when: dashboard_rgw_api_admin_resource | length > 0 when: dashboard_rgw_api_admin_resource | length > 0
- name: disable ssl verification for rgw - name: Disable ssl verification for rgw
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False"
changed_when: false changed_when: false
when: when:
- dashboard_rgw_api_no_ssl_verify | bool - dashboard_rgw_api_no_ssl_verify | bool
- radosgw_frontend_ssl_certificate | length > 0 - radosgw_frontend_ssl_certificate | length > 0
- name: dashboard iscsi management - name: Dashboard iscsi management
when: groups.get(iscsi_gw_group_name, []) | length > 0 when: groups.get(iscsi_gw_group_name, []) | length > 0
run_once: true run_once: true
block: block:
- name: disable iscsi api ssl verification - name: Disable iscsi api ssl verification
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: when:
- api_secure | default(false) | bool - api_secure | default(false) | bool
- generate_crt | default(false) | bool - generate_crt | default(false) | bool
- name: add iscsi gateways - ipv4 - name: Add iscsi gateways - ipv4
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args: args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}" stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no stdin_add_newline: false
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}" with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv4' when: ip_version == 'ipv4'
- name: add iscsi gateways - ipv6 - name: Add iscsi gateways - ipv6
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -" ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args: args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ansible.utils.ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}" stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ansible.utils.ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no stdin_add_newline: false
changed_when: false changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}" with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv6' when: ip_version == 'ipv6'
- name: disable mgr dashboard module (restart) - name: Disable mgr dashboard module (restart)
ceph_mgr_module: ceph_mgr_module:
name: dashboard name: dashboard
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
@ -345,7 +351,7 @@
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
- name: enable mgr dashboard module (restart) - name: Enable mgr dashboard module (restart)
ceph_mgr_module: ceph_mgr_module:
name: dashboard name: dashboard
cluster: "{{ cluster }}" cluster: "{{ cluster }}"

View File

@ -1,12 +1,12 @@
--- ---
- name: set grafana url - name: Set grafana url
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false
- name: inject grafana dashboard layouts - name: Inject grafana dashboard layouts
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update" ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true run_once: true
changed_when: false changed_when: false

View File

@ -1,8 +1,8 @@
--- ---
- name: include configure_dashboard.yml - name: Include configure_dashboard.yml
include_tasks: configure_dashboard.yml ansible.builtin.include_tasks: configure_dashboard.yml
- name: print dashboard URL - name: Print dashboard URL
debug: ansible.builtin.debug:
msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password." msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
run_once: true run_once: true

View File

@ -66,7 +66,7 @@ adopt_label_group_names:
# If configure_firewall is true, then ansible will try to configure the # If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate # appropriate firewalling rules so that Ceph daemons can communicate
# with each others. # with each others.
configure_firewall: True configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it # Open ports on corresponding nodes if firewall is installed on it
ceph_mon_firewall_zone: public ceph_mon_firewall_zone: public
@ -112,7 +112,7 @@ ntp_daemon_type: chronyd
# This variable determines if ceph packages can be updated. If False, the # This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use # package resources will use "state=present". If True, they will use
# "state=latest". # "state=latest".
upgrade_ceph_packages: False upgrade_ceph_packages: false
ceph_use_distro_backports: false # DEBIAN ONLY ceph_use_distro_backports: false # DEBIAN ONLY
ceph_directories_mode: "0755" ceph_directories_mode: "0755"
@ -163,7 +163,7 @@ libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubun
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0) # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@ -221,7 +221,7 @@ ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
# a URL to the .repo file to be installed on the targets. For deb, # a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base. # ceph_custom_repo should be the URL to the repo base.
# #
#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc # ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
ceph_custom_repo: https://server.domain.com/ceph-custom-repo ceph_custom_repo: https://server.domain.com/ceph-custom-repo
@ -230,14 +230,14 @@ ceph_custom_repo: https://server.domain.com/ceph-custom-repo
# Enabled when ceph_repository == 'local' # Enabled when ceph_repository == 'local'
# #
# Path to DESTDIR of the ceph install # Path to DESTDIR of the ceph install
#ceph_installation_dir: "/path/to/ceph_installation/" # ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh # Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine # This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have # If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed # all runtime dependencies installed
#use_installer: false # use_installer: false
# Root directory for ceph-ansible # Root directory for ceph-ansible
#ansible_dir: "/path/to/ceph-ansible" # ansible_dir: "/path/to/ceph-ansible"
###################### ######################
@ -320,12 +320,12 @@ monitor_address_block: subnet
ip_version: ipv4 ip_version: ipv4
mon_host_v1: mon_host_v1:
enabled: True enabled: true
suffix: ':6789' suffix: ':6789'
mon_host_v2: mon_host_v2:
suffix: ':3300' suffix: ':3300'
enable_ceph_volume_debug: False enable_ceph_volume_debug: false
########## ##########
# CEPHFS # # CEPHFS #
@ -397,7 +397,7 @@ email_address: foo@bar.com
## Testing mode ## Testing mode
# enable this mode _only_ when you have a single node # enable this mode _only_ when you have a single node
# if you don't want it keep the option commented # if you don't want it keep the option commented
#common_single_host_mode: true # common_single_host_mode: true
## Handlers - restarting daemons after a config change ## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes # if for whatever reasons the content of your ceph configuration changes
@ -519,16 +519,16 @@ ceph_docker_image: "ceph/daemon-base"
ceph_docker_image_tag: latest-main ceph_docker_image_tag: latest-main
ceph_docker_registry: quay.io ceph_docker_registry: quay.io
ceph_docker_registry_auth: false ceph_docker_registry_auth: false
#ceph_docker_registry_username: # ceph_docker_registry_username:
#ceph_docker_registry_password: # ceph_docker_registry_password:
#ceph_docker_http_proxy: # ceph_docker_http_proxy:
#ceph_docker_https_proxy: # ceph_docker_https_proxy:
ceph_docker_no_proxy: "localhost,127.0.0.1" ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }} ## Client only docker image - defaults to {{ ceph_docker_image }}
ceph_client_docker_image: "{{ ceph_docker_image }}" ceph_client_docker_image: "{{ ceph_docker_image }}"
ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
ceph_client_docker_registry: "{{ ceph_docker_registry }}" ceph_client_docker_registry: "{{ ceph_docker_registry }}"
containerized_deployment: False containerized_deployment: false
container_binary: container_binary:
timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
@ -555,7 +555,7 @@ openstack_config: false
# name: "images" # name: "images"
# rule_name: "my_replicated_rule" # rule_name: "my_replicated_rule"
# application: "rbd" # application: "rbd"
# pg_autoscale_mode: False # pg_autoscale_mode: false
# pg_num: 16 # pg_num: 16
# pgp_num: 16 # pgp_num: 16
# target_size_ratio: 0.2 # target_size_ratio: 0.2
@ -605,7 +605,7 @@ openstack_keys:
############# #############
# DASHBOARD # # DASHBOARD #
############# #############
dashboard_enabled: True dashboard_enabled: true
# Choose http or https # Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key # For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '', # If you define the dashboard_crt and dashboard_key variables, but leave them as '',
@ -617,7 +617,7 @@ dashboard_network: "{{ public_network }}"
dashboard_admin_user: admin dashboard_admin_user: admin
dashboard_admin_user_ro: false dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True # This variable must be set with a strong custom password when dashboard_enabled is True
#dashboard_admin_password: p@ssw0rd # dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections # We only need this for SSL (https) connections
dashboard_crt: '' dashboard_crt: ''
dashboard_key: '' dashboard_key: ''
@ -626,7 +626,7 @@ dashboard_tls_external: false
dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
dashboard_rgw_api_user_id: ceph-dashboard dashboard_rgw_api_user_id: ceph-dashboard
dashboard_rgw_api_admin_resource: '' dashboard_rgw_api_admin_resource: ''
dashboard_rgw_api_no_ssl_verify: False dashboard_rgw_api_no_ssl_verify: false
dashboard_frontend_vip: '' dashboard_frontend_vip: ''
dashboard_disabled_features: [] dashboard_disabled_features: []
prometheus_frontend_vip: '' prometheus_frontend_vip: ''
@ -635,7 +635,7 @@ node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0"
node_exporter_port: 9100 node_exporter_port: 9100
grafana_admin_user: admin grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True # This variable must be set with a strong custom password when dashboard_enabled is True
#grafana_admin_password: admin # grafana_admin_password: admin
# We only need this for SSL (https) connections # We only need this for SSL (https) connections
grafana_crt: '' grafana_crt: ''
grafana_key: '' grafana_key: ''
@ -667,7 +667,7 @@ grafana_dashboard_files:
grafana_plugins: grafana_plugins:
- vonage-status-panel - vonage-status-panel
- grafana-piechart-panel - grafana-piechart-panel
grafana_allow_embedding: True grafana_allow_embedding: true
grafana_port: 3000 grafana_port: 3000
grafana_network: "{{ public_network }}" grafana_network: "{{ public_network }}"
grafana_conf_overrides: {} grafana_conf_overrides: {}
@ -683,7 +683,7 @@ prometheus_port: 9092
prometheus_conf_overrides: {} prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage. # Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data. # set it to '30d' if you want to retain 30 days of data.
#prometheus_storage_tsdb_retention_time: 15d # prometheus_storage_tsdb_retention_time: 15d
alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
alertmanager_container_cpu_period: 100000 alertmanager_container_cpu_period: 100000
alertmanager_container_cpu_cores: 2 alertmanager_container_cpu_cores: 2
@ -741,11 +741,11 @@ gateway_ip_list: 0.0.0.0
# #
# Example: # Example:
# #
#rbd_devices: # rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } # - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
rbd_devices: {} rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs # client_connections defines the client ACL's to restrict client access to specific LUNs
@ -759,20 +759,19 @@ rbd_devices: {}
# #
# Example: # Example:
# #
#client_connections: # client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } # - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } # - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
client_connections: {} client_connections: {}
no_log_on_ceph_key_tasks: True no_log_on_ceph_key_tasks: true
############### ###############
# DEPRECATION # # DEPRECATION #
############### ###############
###################################################### ######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM # # *DO NOT* MODIFY THEM #
@ -780,4 +779,4 @@ no_log_on_ceph_key_tasks: True
container_exec_cmd: container_exec_cmd:
docker: false docker: false
ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"

View File

@ -4,14 +4,14 @@ galaxy_info:
author: Sébastien Han author: Sébastien Han
description: Handles ceph-ansible default vars for all roles description: Handles ceph-ansible default vars for all roles
license: Apache license: Apache
min_ansible_version: 2.7 min_ansible_version: '2.7'
platforms: platforms:
- name: Ubuntu - name: Ubuntu
versions: versions:
- xenial - xenial
- name: EL - name: EL
versions: versions:
- 7 - 'all'
galaxy_tags: galaxy_tags:
- system - system
dependencies: [] dependencies: []

Some files were not shown because too many files have changed in this diff Show More