drop iscsigw support

This service is no longer maintained.
Let's drop its support within ceph-ansible.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
pull/7481/head
Guillaume Abrioux 2024-02-16 01:16:55 +01:00
parent 05c4d17d9a
commit dc75923367
124 changed files with 5 additions and 2356 deletions

View File

@ -66,7 +66,6 @@ It means if you are pushing a patch modifying one of these files:
- `./roles/ceph-nfs/defaults/main.yml`
- `./roles/ceph-client/defaults/main.yml`
- `./roles/ceph-common/defaults/main.yml`
- `./roles/ceph-iscsi-gw/defaults/main.yml`
- `./roles/ceph-mon/defaults/main.yml`
- `./roles/ceph-rgw/defaults/main.yml`
- `./roles/ceph-container-common/defaults/main.yml`

46
Vagrantfile vendored
View File

@ -24,7 +24,6 @@ NNFSS = settings['nfs_vms']
GRAFANA = settings['grafana_server_vms']
NRBD_MIRRORS = settings['rbd_mirror_vms']
CLIENTS = settings['client_vms']
NISCSI_GWS = settings['iscsi_gw_vms']
MGRS = settings['mgr_vms']
PUBLIC_SUBNET = settings['public_subnet']
CLUSTER_SUBNET = settings['cluster_subnet']
@ -67,7 +66,6 @@ ansible_provision = proc do |ansible|
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
'iscsigws' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" },
'mgrs' => (0..MGRS - 1).map { |j| "#{LABEL_PREFIX}mgr#{j}" },
'monitoring' => (0..GRAFANA - 1).map { |j| "#{LABEL_PREFIX}grafana#{j}" }
}
@ -560,50 +558,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end
end
(0..NISCSI_GWS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}iscsi-gw#{i}" do |iscsi_gw|
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}"
if ASSIGN_STATIC_IP && !IPV6
iscsi_gw.vm.network :private_network,
:ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
end
# Virtualbox
iscsi_gw.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
end
# VMware
iscsi_gw.vm.provider :vmware_fusion do |v|
v.vmx['memsize'] = "#{MEMORY}"
end
# Libvirt
iscsi_gw.vm.provider :libvirt do |lv,override|
lv.memory = MEMORY
lv.random_hostname = true
if IPV6 then
override.vm.network :private_network,
:libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
:libvirt__ipv6_prefix => "64",
:libvirt__dhcp_enabled => false,
:libvirt__forward_mode => "veryisolated",
:libvirt__network_name => "ipv6-public-network",
:ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
:netmask => "64"
end
end
# Parallels
iscsi_gw.vm.provider "parallels" do |prl|
prl.name = "iscsi-gw#{i}"
prl.memory = "#{MEMORY}"
end
iscsi_gw.vm.provider :linode do |provider|
provider.label = iscsi_gw.vm.hostname
end
end
end
(0..NOSDS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}"

View File

@ -10,7 +10,6 @@ rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -27,7 +27,6 @@ rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant

View File

@ -11,7 +11,6 @@ rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
# SUBNET TO USE FOR THE VMS
# Use whatever private subnet your Openstack VMs are given

View File

@ -8,7 +8,6 @@
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
gather_facts: false
become: true

View File

@ -50,7 +50,6 @@ to follow (most of them are 1 line settings).
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
For a deployment that needs 1 MON and 1 OSD, the list would look like:

View File

@ -66,8 +66,6 @@ for role in "$basedir"/roles/ceph-*; do
output="ceph-fetch-keys.yml.sample"
elif [[ $rolename == "ceph-rbd-mirror" ]]; then
output="rbdmirrors.yml.sample"
elif [[ $rolename == "ceph-iscsi-gw" ]]; then
output="iscsigws.yml.sample"
elif [[ $rolename == "ceph-rgw-loadbalancer" ]]; then
output="rgwloadbalancers.yml.sample"
else

View File

@ -54,7 +54,6 @@ dummy:
#nfs_group_name: nfss
#rbdmirror_group_name: rbdmirrors
#client_group_name: clients
#iscsi_gw_group_name: iscsigws
#mgr_group_name: mgrs
#rgwloadbalancer_group_name: rgwloadbalancers
#monitoring_group_name: monitoring
@ -66,7 +65,6 @@ dummy:
# - "{{ nfs_group_name }}"
# - "{{ rbdmirror_group_name }}"
# - "{{ client_group_name }}"
# - "{{ iscsi_gw_group_name }}"
# - "{{ mgr_group_name }}"
# - "{{ rgwloadbalancer_group_name }}"
# - "{{ monitoring_group_name }}"
@ -84,7 +82,6 @@ dummy:
#ceph_mds_firewall_zone: public
#ceph_nfs_firewall_zone: public
#ceph_rbdmirror_firewall_zone: public
#ceph_iscsi_firewall_zone: public
#ceph_dashboard_firewall_zone: public
#ceph_rgwloadbalancer_firewall_zone: public
@ -218,8 +215,6 @@ dummy:
# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
#nfs_ganesha_flavor: "ceph_main"
#ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
# REPOSITORY: CUSTOM
#
@ -703,75 +698,6 @@ dummy:
#alertmanager_cluster_port: 9094
#alertmanager_conf_overrides: {}
#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
# igw
#
# `igw_network` variable is intended for allowing dashboard deployment with iSCSI node not residing in the same subnet than what is defined in `public_network`.
# For example:
# If the ceph public network is 2a00:8a60:1:c301::/64 and the iSCSI Gateway resides
# at a dedicated gateway network (2a00:8a60:1:c300::/64) (With routing between those networks).
# It means "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }}" will be empty.
# As a consequence, this prevent from deploying dashboard with iSCSI node when it reside in a subnet different than `public_network`.
# Using `igw_network` make it possible, set it with the subnet used by your iSCSI node.
#igw_network: "{{ public_network }}"
##################################
# DEPRECIATED iSCSI TARGET SETUP #
##################################
# WARNING #
# The following values are depreciated. To setup targets, gateways, LUNs, and
# clients you should use gwcli or dashboard. If the following values are set,
# the old ceph-iscsi-config/ceph-iscsi-cli packages will be used.
# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
# client sees the gateway group as a single storage subsystem.
#gateway_iqn: ""
# gateway_ip_list provides a list of the IP Addrresses - one per gateway - that will be used
# as an iscsi target portal ip. The list must be comma separated - and the order determines
# the sequence of TPG's within the iscsi target across each gateway. Once set, additional
# gateways can be added, but the order must *not* be changed.
#gateway_ip_list: 0.0.0.0
# rbd_devices defines the images that should be created and exported from the iscsi gateways.
# If the rbd does not exist, it will be created for you. In addition you may increase the
# size of rbd's by changing the size parameter and rerunning the playbook. A size value lower
# than the current size of the rbd is ignored.
#
# the 'host' parameter defines which of the gateway nodes should handle the physical
# allocation/expansion or removal of the rbd
# to remove an image, simply use a state of 'absent'. This will first check the rbd is not allocated
# to any client, and the remove it from LIO and then delete the rbd image
#
# NB. this variable definition can be commented out to bypass LUN management
#
# Example:
#
# rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
# The settings are as follows;
# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
# - chap supplies the user and password the client will use for authentication of the
# form <user>/<password>
# - status shows the intended state of this client definition - 'present' or 'absent'
#
# NB. this definition can be commented out to skip client (nodeACL) management
#
# Example:
#
# client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {}
#no_log_on_ceph_key_tasks: true

View File

@ -1,58 +0,0 @@
---
# Variables here are applicable to all host groups NOT roles
# This sample file generated by generate_group_vars_sample.sh
# Dummy variable to avoid error because ansible does not recognize the
# file as a good configuration file when no variable in it.
dummy:
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Whether or not to generate secure certificate to iSCSI gateway nodes
#generate_crt: false
#iscsi_conf_overrides: {}
#iscsi_pool_name: rbd
# iscsi_pool_size: 3
#copy_admin_key: true
##################
# RBD-TARGET-API #
##################
# Optional settings related to the CLI/API service
#api_user: admin
#api_password: admin
#api_port: 5000
#api_secure: false
#loop_delay: 1
# set the variable below with a comma separated list of IPs
# in order to restrict the access to the iSCSI API
# trusted_ip_list: 192.168.122.1
##########
# DOCKER #
##########
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_api_docker_cpu_limit: 1

View File

@ -54,7 +54,6 @@ dummy:
#nfs_group_name: nfss
#rbdmirror_group_name: rbdmirrors
#client_group_name: clients
#iscsi_gw_group_name: iscsigws
#mgr_group_name: mgrs
#rgwloadbalancer_group_name: rgwloadbalancers
#monitoring_group_name: monitoring
@ -66,7 +65,6 @@ dummy:
# - "{{ nfs_group_name }}"
# - "{{ rbdmirror_group_name }}"
# - "{{ client_group_name }}"
# - "{{ iscsi_gw_group_name }}"
# - "{{ mgr_group_name }}"
# - "{{ rgwloadbalancer_group_name }}"
# - "{{ monitoring_group_name }}"
@ -84,7 +82,6 @@ dummy:
#ceph_mds_firewall_zone: public
#ceph_nfs_firewall_zone: public
#ceph_rbdmirror_firewall_zone: public
#ceph_iscsi_firewall_zone: public
#ceph_dashboard_firewall_zone: public
#ceph_rgwloadbalancer_firewall_zone: public
@ -218,8 +215,6 @@ ceph_rhcs_version: 5
# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
#nfs_ganesha_flavor: "ceph_main"
ceph_iscsi_config_dev: false
# REPOSITORY: CUSTOM
#
@ -703,75 +698,6 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
#alertmanager_cluster_port: 9094
#alertmanager_conf_overrides: {}
#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
# igw
#
# `igw_network` variable is intended for allowing dashboard deployment with iSCSI node not residing in the same subnet than what is defined in `public_network`.
# For example:
# If the ceph public network is 2a00:8a60:1:c301::/64 and the iSCSI Gateway resides
# at a dedicated gateway network (2a00:8a60:1:c300::/64) (With routing between those networks).
# It means "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }}" will be empty.
# As a consequence, this prevent from deploying dashboard with iSCSI node when it reside in a subnet different than `public_network`.
# Using `igw_network` make it possible, set it with the subnet used by your iSCSI node.
#igw_network: "{{ public_network }}"
##################################
# DEPRECIATED iSCSI TARGET SETUP #
##################################
# WARNING #
# The following values are depreciated. To setup targets, gateways, LUNs, and
# clients you should use gwcli or dashboard. If the following values are set,
# the old ceph-iscsi-config/ceph-iscsi-cli packages will be used.
# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
# client sees the gateway group as a single storage subsystem.
#gateway_iqn: ""
# gateway_ip_list provides a list of the IP Addrresses - one per gateway - that will be used
# as an iscsi target portal ip. The list must be comma separated - and the order determines
# the sequence of TPG's within the iscsi target across each gateway. Once set, additional
# gateways can be added, but the order must *not* be changed.
#gateway_ip_list: 0.0.0.0
# rbd_devices defines the images that should be created and exported from the iscsi gateways.
# If the rbd does not exist, it will be created for you. In addition you may increase the
# size of rbd's by changing the size parameter and rerunning the playbook. A size value lower
# than the current size of the rbd is ignored.
#
# the 'host' parameter defines which of the gateway nodes should handle the physical
# allocation/expansion or removal of the rbd
# to remove an image, simply use a state of 'absent'. This will first check the rbd is not allocated
# to any client, and the remove it from LIO and then delete the rbd image
#
# NB. this variable definition can be commented out to bypass LUN management
#
# Example:
#
# rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
# The settings are as follows;
# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
# - chap supplies the user and password the client will use for authentication of the
# form <user>/<password>
# - status shows the intended state of this client definition - 'present' or 'absent'
#
# NB. this definition can be commented out to skip client (nodeACL) management
#
# Example:
#
# client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {}
#no_log_on_ceph_key_tasks: true

View File

@ -42,7 +42,6 @@
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
any_errors_fatal: true
@ -158,7 +157,6 @@
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, [])
- name: Configure repository for installing cephadm
@ -448,9 +446,7 @@
inventory_hostname in groups.get(mds_group_name, []) or
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, [])
inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: Store existing rbd mirror peers in monitor config store
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
@ -636,69 +632,6 @@
- /etc/systemd/system/ceph-mgr@.service.d
- /etc/systemd/system/ceph-mgr.target
- name: Stop and remove legacy iscsigw daemons
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
serial: 1
become: true
gather_facts: false
any_errors_fatal: true
tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Stop and disable iscsigw systemd services
ansible.builtin.service:
name: '{{ item }}'
state: stopped
enabled: false
failed_when: false
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
- name: Reset failed iscsigw systemd units
ansible.builtin.command: 'systemctl reset-failed {{ item }}' # noqa command-instead-of-module
changed_when: false
failed_when: false
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
when: containerized_deployment | bool
- name: Remove iscsigw systemd unit files
ansible.builtin.file:
path: '/etc/systemd/system/{{ item }}.service'
state: absent
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
when: containerized_deployment | bool
- name: Redeploy iscsigw daemons
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
become: true
gather_facts: false
any_errors_fatal: true
tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Update the placement of iscsigw hosts
ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'"
run_once: true
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: Set osd flags
hosts: "{{ osd_group_name|default('osds') }}"
become: true
@ -1434,7 +1367,6 @@
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false

View File

@ -8,7 +8,6 @@
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
@ -219,7 +218,6 @@
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false

View File

@ -14,7 +14,6 @@
- nfss
- rbdmirrors
- clients
- iscsigws
- mgrs
- monitoring
@ -63,7 +62,6 @@
- "{{ rgw_group_name | default('rgws') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false
@ -117,7 +115,6 @@
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, [])
- name: Pulling alertmanager/grafana/prometheus images from docker daemon
@ -150,12 +147,6 @@
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, [])
- name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw
tasks_from: systemd.yml
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Import ceph-mds role
ansible.builtin.import_role:
name: ceph-mds

View File

@ -9,7 +9,6 @@
- rbdmirrors
- clients
- mgrs
- iscsigws
gather_facts: false
become: true

View File

@ -149,7 +149,6 @@
- clients
- mgrs
- monitoring
- iscsigws
become: true
tasks:
- name: Import ceph-defaults role
@ -1065,9 +1064,6 @@
paths: /run
patterns:
- "ceph-*.service-cid"
- "rbd-target-api.service-cid"
- "rbd-target-gw.service-cid"
- "tcmu-runner.service-cid"
- "node_exporter.service-cid"
- "prometheus.service-cid"
- "grafana-server.service-cid"

View File

@ -1,97 +0,0 @@
---
- name: Confirm removal of the iSCSI gateway configuration
hosts: localhost
vars_prompt:
- name: purge_config # noqa: name[casing]
prompt: Which configuration elements should be purged? (all, lio or abort)
default: 'abort'
private: false
tasks:
- name: Exit playbook if user aborted the purge
ansible.builtin.fail:
msg: >
"You have aborted the purge of the iSCSI gateway configuration"
when: purge_config == 'abort'
- name: Set_fact igw_purge_type
ansible.builtin.set_fact:
igw_purge_type: "{{ purge_config }}"
- name: Stopping the gateways
hosts:
- iscsigws
become: true
tasks:
- name: Stopping and disabling iscsi daemons
ansible.builtin.service:
name: "{{ item }}"
state: stopped
enabled: false
with_items:
- rbd-target-gw
- rbd-target-api
- tcmu-runner
- name: Removing the gateway configuration
hosts:
- iscsigws
become: true
vars:
igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
tasks:
- name: Igw_purge | deleting configured rbd devices
igw_purge:
mode: "disks"
when: igw_purge_type == 'all'
run_once: true
- name: Igw_purge | purging the gateway configuration
igw_purge:
mode: "gateway"
run_once: true
- name: Restart and enable iscsi daemons
when: igw_purge_type == 'lio'
ansible.builtin.service:
name: "{{ item }}"
state: started
enabled: true
with_items:
- tcmu-runner
- rbd-target-api
- rbd-target-gw
- name: Remove the gateways from the ceph dashboard
hosts: mons
become: true
tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Iscsi gateways with ceph dashboard
when: dashboard_enabled | bool
run_once: true
block:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- name: Set_fact container_exec_cmd
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: Get iscsi gateway list
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
changed_when: false
register: gateways
- name: Remove iscsi gateways
ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
with_items: '{{ (gateways.stdout | from_json)["gateways"] }}'
changed_when: false

View File

@ -57,7 +57,6 @@
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ client_group_name|default('clients') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
tags: always
any_errors_fatal: true
@ -1075,62 +1074,6 @@
ansible.builtin.import_role:
name: ceph-nfs
- name: Upgrade ceph iscsi gateway node
vars:
upgrade_ceph_packages: true
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
tags: iscsigws
serial: 1
become: true
gather_facts: false
tasks:
# failed_when: false is here so that if we upgrade
# from a version of ceph that does not have iscsi gws
# then this task will not fail
- name: Stop ceph iscsi services
ansible.builtin.systemd:
name: '{{ item }}'
state: stopped
enabled: false
masked: true
failed_when: false
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
- name: Import ceph-common role
ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- name: Import ceph-config role
ansible.builtin.import_role:
name: ceph-config
- name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw
- name: Upgrade ceph client node
vars:
upgrade_ceph_packages: true
@ -1245,7 +1188,6 @@
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
tags: monitoring
gather_facts: false

View File

@ -718,64 +718,6 @@
ansible.builtin.import_role:
name: ceph-nfs
- name: Switching from non-containerized to containerized iscsigws
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
vars:
containerized_deployment: true
iscsi_gw_group_name: iscsigws
become: true
serial: 1
pre_tasks:
- name: Import ceph-defaults role
ansible.builtin.import_role:
name: ceph-defaults
- name: Stop iscsigw services
ansible.builtin.service:
name: "{{ item }}"
state: stopped
enabled: false
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
- name: Remove old systemd unit files
ansible.builtin.file:
path: "/usr/lib/systemd/system/{{ item }}.service"
state: absent
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
tasks:
- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
- name: Import ceph-handler role
ansible.builtin.import_role:
name: ceph-handler
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- name: Set proper ownership on ceph directories
ansible.builtin.command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- name: Import ceph-container-engine role
ansible.builtin.import_role:
name: ceph-container-engine
- name: Import ceph-container-common role
ansible.builtin.import_role:
name: ceph-container-common
- name: Import ceph-iscsi-gw role
ansible.builtin.import_role:
name: ceph-iscsi-gw
- name: Switching from non-containerized to containerized ceph-crash
hosts:

View File

@ -33,7 +33,6 @@
- rbdmirrors
- clients
- mgrs
- iscsi-gw
become: true
tasks:
- name: Import ceph-defaults role

View File

@ -1,135 +0,0 @@
#!/usr/bin/env python
__author__ = 'pcuzner@redhat.com'
DOCUMENTATION = """
---
module: igw_client
short_description: Manage iscsi gateway client definitions
description:
- This module calls the 'client' configuration management module installed
on the iscsi gateway node to handle the definition of iscsi clients on the
gateway(s). This definition will setup iscsi authentication (e.g. chap),
and mask the required rbd images to the client.
The 'client' configuration module is provided by ceph-iscsi-config
rpm which is installed on the gateway nodes.
To support module debugging, this module logs to
/var/log/ansible-module-igw_config.log on the target machine(s).
option:
client_iqn:
description:
- iqn of the client machine which should be connected or removed from the
iscsi gateway environment
required: true
image_list:
description:
- comma separated string providing the rbd images that this
client definition should have. The rbd images provided must use the
following format <pool_name>.<rbd_image_name>
e.g. rbd.disk1,rbd.disk2
required: true
chap:
description:
- chap credentials for the client to authenticate to the gateways
to gain access to the exported rbds (LUNs). The credentials is a string
value of the form 'username/password'. The iscsi client must then use
these settings to gain access to any LUN resources.
required: true
state:
description:
- desired state for this client - absent or present
required: true
requirements: ['ceph-iscsi-config']
author:
- 'Paul Cuzner'
"""
import os # noqa: E402
import logging # noqa: E402
from logging.handlers import RotatingFileHandler # noqa: E402
from ansible.module_utils.basic import * # noqa: E402,F403
from ceph_iscsi_config.client import GWClient # noqa: E402
import ceph_iscsi_config.settings as settings # noqa: E402
# the main function is called ansible_main to allow the call stack
# to be checked to determine whether the call to the ceph_iscsi_config
# modules is from ansible or not
def ansible_main():
fields = {
"client_iqn": {"required": True, "type": "str"},
"image_list": {"required": True, "type": "str"},
"chap": {"required": True, "type": "str"},
"state": {
"required": True,
"choices": ['present', 'absent'],
"type": "str"
},
}
module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
client_iqn = module.params['client_iqn']
if module.params['image_list']:
image_list = module.params['image_list'].split(',')
else:
image_list = []
chap = module.params['chap']
desired_state = module.params['state']
logger.info("START - Client configuration started : {}".format(client_iqn))
# The client is defined using the GWClient class. This class handles
# client attribute updates, rados configuration object updates and LIO
# settings. Since the logic is external to this custom module, clients
# can be created/deleted by other methods in the same manner.
client = GWClient(logger, client_iqn, image_list, chap)
if client.error:
module.fail_json(msg=client.error_msg)
client.manage(desired_state)
if client.error:
module.fail_json(msg=client.error_msg)
logger.info("END - Client configuration complete - {} "
"changes made".format(client.change_count))
changes_made = True if client.change_count > 0 else False
module.exit_json(changed=changes_made,
meta={"msg": "Client definition completed {} "
"changes made".format(client.change_count)})
if __name__ == '__main__':
module_name = os.path.basename(__file__).replace('ansible_module_', '')
logger = logging.getLogger(os.path.basename(module_name))
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
maxBytes=5242880,
backupCount=7)
log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
'%(message)s')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
# initialise global variables used by all called modules
# e.g. ceph conffile, keyring etc
settings.init()
ansible_main()

View File

@ -1,145 +0,0 @@
#!/usr/bin/env python
__author__ = 'pcuzner@redhat.com'
DOCUMENTATION = """
---
module: igw_gateway
short_description: Manage the iscsi gateway definition
description:
- This module calls the 'gateway' configuration management module installed
on the iscsi gateway node(s) to handle the definition of iscsi gateways.
The module will configure;
* the iscsi target and target portal group (TPG)
* rbd maps to the gateway and registration of those rbds as LUNs to the
kernels LIO subsystem
The actual configuration modules are provided by ceph-iscsi-config rpm
which is installed on the gateway nodes.
To support module debugging, this module logs to
/var/log/ansible-module-igw_config.log on the target machine(s).
option:
gateway_iqn:
description:
- iqn that all gateway nodes will use to present a common system image
name to iscsi clients
required: true
gateway_ip_list:
description:
- comma separated string providing the IP addresses that will be used
as iSCSI portal IPs to accept iscsi client connections. Each IP address
should equate to an IP on a gateway node - typically dedicated to iscsi
traffic. The order of the IP addresses determines the TPG sequence
within the target definition - so once defined, new gateways can be
added but *must* be added to the end of this list to preserve the tpg
sequence
e.g. 192.168.122.101,192.168.122.103
required: true
mode:
description:
- mode in which to run the gateway module. Two modes are supported
target ... define the iscsi target iqn, tpg's and portals
map ...... map luns to the tpg's, and also define the ALUA path setting
for each LUN (activeOptimized/activenonoptimized)
required: true
requirements: ['ceph-iscsi-config']
author:
- 'Paul Cuzner'
"""
import os # noqa: E402
import logging # noqa: E402
from logging.handlers import RotatingFileHandler # noqa: E402
from ansible.module_utils.basic import * # noqa: E402,F403
import ceph_iscsi_config.settings as settings # noqa: E402
from ceph_iscsi_config.common import Config # noqa: E402
from ceph_iscsi_config.gateway import GWTarget # noqa: E402
from ceph_iscsi_config.utils import valid_ip # noqa: E402
# the main function is called ansible_main to allow the call stack
# to be checked to determine whether the call to the ceph_iscsi_config
# modules is from ansible or not
def ansible_main():
# Configures the gateway on the host. All images defined are added to
# the default tpg for later allocation to clients
fields = {"gateway_iqn": {"required": True, "type": "str"},
"gateway_ip_list": {"required": True}, # "type": "list"},
"mode": {
"required": True,
"choices": ['target', 'map']
}
}
module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
cfg = Config(logger)
if cfg.config['version'] > 3:
module.fail_json(msg="Unsupported iscsigws.yml/iscsi-gws.yml setting "
"detected. Remove depreciated iSCSI target, LUN, "
"client, and gateway settings from "
"iscsigws.yml/iscsi-gws.yml. See "
"iscsigws.yml.sample for list of supported "
"settings")
gateway_iqn = module.params['gateway_iqn']
gateway_ip_list = module.params['gateway_ip_list'].split(',')
mode = module.params['mode']
if not valid_ip(gateway_ip_list):
module.fail_json(msg="Invalid gateway IP address(es) provided - port "
"22 check failed ({})".format(gateway_ip_list))
logger.info("START - GATEWAY configuration started - mode {}".format(mode))
gateway = GWTarget(logger, gateway_iqn, gateway_ip_list)
if gateway.error:
logger.critical("(ansible_main) Gateway init failed - "
"{}".format(gateway.error_msg))
module.fail_json(msg="iSCSI gateway initialisation failed "
"({})".format(gateway.error_msg))
gateway.manage(mode)
if gateway.error:
logger.critical("(main) Gateway creation or load failed, "
"unable to continue")
module.fail_json(msg="iSCSI gateway creation/load failure "
"({})".format(gateway.error_msg))
logger.info("END - GATEWAY configuration complete")
module.exit_json(changed=gateway.changes_made,
meta={"msg": "Gateway setup complete"})
if __name__ == '__main__':
module_name = os.path.basename(__file__).replace('ansible_module_', '')
logger = logging.getLogger(os.path.basename(module_name))
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
maxBytes=5242880,
backupCount=7)
log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
'%(message)s')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
# initialise global variables used by all called modules
# e.g. ceph conffile, keyring etc
settings.init()
ansible_main()

View File

@ -1,168 +0,0 @@
#!/usr/bin/env python
__author__ = 'pcuzner@redhat.com'
DOCUMENTATION = """
---
module: igw_lun
short_description: Manage ceph rbd images to present as iscsi LUNs to clients
description:
- This module calls the 'lun' configuration management module installed
on the iscsi gateway node(s). The lun module handles the creation and resize # noqa: E501
of rbd images, and then maps these rbd devices to the gateway node(s) to be
exposed through the kernel's LIO target.
To support module debugging, this module logs to /var/log/ansible-module-igw_config.log # noqa: E501
on the target machine(s).
option:
pool:
description:
- The ceph pool where the image should exist or be created in.
NOTE - The pool *must* exist prior to the Ansible run.
required: true
image:
description:
- this is the rbd image name to create/resize - if the rbd does not exist it
is created for you with the settings optimised for exporting over iscsi.
required: true
size:
description:
- The size of the rbd image to create/resize. The size is numeric suffixed by
G or T (GB or TB). Increasing the size of a LUN is supported, but if a size
is provided that is smaller that the current size, the request is simply ignored.
e.g. 100G
required: true
host:
description:
- the host variable defines the name of the gateway node that will be
the allocation host for this rbd image. RBD creation and resize can
only be performed by one gateway, the other gateways in the
configuration will wait for the operation to complete.
required: true
features:
description:
- placeholder to potentially allow different rbd features to be set at
allocation time by Ansible. NOT CURRENTLY USED
required: false
state:
description:
- desired state for this LUN - absent or present. For a state='absent'
request, the lun module will verify that the rbd image is not allocated to
a client. As long as the rbd image is not in use, the LUN definition will be
removed from LIO, unmapped from all gateways AND DELETED.
USE WITH CARE!
required: true
requirements: ['ceph-iscsi-config']
author:
- 'Paul Cuzner'
"""
import os # noqa: E402
import logging # noqa: E402
from logging.handlers import RotatingFileHandler # noqa: E402
from ansible.module_utils.basic import * # noqa: E402,F403
from ceph_iscsi_config.lun import LUN # noqa: E402
from ceph_iscsi_config.utils import valid_size # noqa: E402
import ceph_iscsi_config.settings as settings # noqa: E402
# the main function is called ansible_main to allow the call stack
# to be checked to determine whether the call to the ceph_iscsi_config
# modules is from ansible or not
def ansible_main():
# Define the fields needs to create/map rbd's the the host(s)
# NB. features and state are reserved/unused
fields = {
"pool": {"required": False, "default": "rbd", "type": "str"},
"image": {"required": True, "type": "str"},
"size": {"required": True, "type": "str"},
"host": {"required": True, "type": "str"},
"features": {"required": False, "type": "str"},
"state": {
"required": False,
"default": "present",
"choices": ['present', 'absent'],
"type": "str"
},
}
# not supporting check mode currently
module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
pool = module.params["pool"]
image = module.params['image']
size = module.params['size']
allocating_host = module.params['host']
desired_state = module.params['state']
################################################
# Validate the parameters passed from Ansible #
################################################
if not valid_size(size):
logger.critical("image '{}' has an invalid size specification '{}' "
"in the ansible configuration".format(image,
size))
module.fail_json(msg="(main) Unable to use the size parameter '{}' "
"for image '{}' from the playbook - "
"must be a number suffixed by M,G "
"or T".format(size,
image))
# define a lun object and perform some initial parameter validation
lun = LUN(logger, pool, image, size, allocating_host)
if lun.error:
module.fail_json(msg=lun.error_msg)
logger.info("START - LUN configuration started for {}/{}".format(pool,
image))
# attempt to create/allocate the LUN for LIO
lun.manage(desired_state)
if lun.error:
module.fail_json(msg=lun.error_msg)
if lun.num_changes == 0:
logger.info("END - No changes needed")
else:
logger.info("END - {} configuration changes "
"made".format(lun.num_changes))
module.exit_json(changed=(lun.num_changes > 0),
meta={"msg": "Configuration updated"})
if __name__ == '__main__':
module_name = os.path.basename(__file__).replace('ansible_module_', '')
logger = logging.getLogger(os.path.basename(module_name))
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
maxBytes=5242880,
backupCount=7)
log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
'%(message)s')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
# initialise global variables used by all called modules
# e.g. ceph conffile, keyring etc
settings.init()
ansible_main()

View File

@ -1,161 +0,0 @@
#!/usr/bin/env python
DOCUMENTATION = """
---
module: igw_purge
short_description: Provide a purge capability to remove an iSCSI gateway
environment
description:
- This module handles the removal of a gateway configuration from a ceph
environment.
The playbook that calls this module prompts the user for the type of purge
to perform.
The purge options are;
all ... purge all LIO configuration *and* delete all defined rbd images
lio ... purge only the LIO configuration (rbd's are left intact)
USE WITH CAUTION
To support module debugging, this module logs to
/var/log/ansible-module-igw_config.log on each target machine(s).
option:
mode:
description:
- the mode defines the type of purge requested
gateway ... remove the LIO configuration only
disks ... remove the rbd disks defined to the gateway
required: true
requirements: ['ceph-iscsi-config', 'python-rtslib']
author:
- 'Paul Cuzner'
"""
import os # noqa: E402
import logging # noqa: E402
import socket # noqa: E402,F401
import rados # noqa: E402
import rbd # noqa: E402
from logging.handlers import RotatingFileHandler # noqa: E402
from ansible.module_utils.basic import * # noqa: E402,F403
import ceph_iscsi_config.settings as settings # noqa: E402
from ceph_iscsi_config.common import Config # noqa: E402
from ceph_iscsi_config.lun import RBDDev # noqa: E402
__author__ = 'pcuzner@redhat.com'
def delete_images(cfg):
changes_made = False
for disk_name, disk in cfg.config['disks'].items():
image = disk['image']
logger.debug("Deleing image {}".format(image))
backstore = disk.get('backstore')
if backstore is None:
# ceph iscsi-config based.
rbd_dev = RBDDev(image, 0, disk['pool'])
else:
# ceph-iscsi based.
rbd_dev = RBDDev(image, 0, backstore, disk['pool'])
try:
rbd_dev.delete()
except rbd.ImageNotFound:
# Just log and ignore. If we crashed while purging we could delete
# the image but not removed it from the config
logger.debug("Image already deleted.")
except rbd.ImageHasSnapshots:
logger.error("Image still has snapshots.")
# Older versions of ceph-iscsi-config do not have a error_msg
# string.
if not rbd_dev.error_msg:
rbd_dev.error_msg = "Image has snapshots."
if rbd_dev.error:
if rbd_dev.error_msg:
logger.error("Could not remove {}. Error: {}. Manually run the " # noqa: E501
"rbd command line tool to delete.".
format(image, rbd_dev.error_msg))
else:
logger.error("Could not remove {}. Manually run the rbd "
"command line tool to delete.".format(image))
else:
changes_made = True
return changes_made
def delete_gateway_config(cfg, module):
ioctx = cfg._open_ioctx()
try:
size, mtime = ioctx.stat(cfg.config_name)
except rados.ObjectNotFound:
logger.debug("gateway.conf already removed.")
return False
try:
ioctx.remove_object(cfg.config_name)
except Exception as err:
module.fail_json(msg="Gateway config object failed: {}".format(err))
return True
def ansible_main():
fields = {"mode": {"required": True,
"type": "str",
"choices": ["gateway", "disks"]
}
}
module = AnsibleModule(argument_spec=fields, # noqa: F405
supports_check_mode=False)
run_mode = module.params['mode']
changes_made = False
logger.info("START - GATEWAY configuration PURGE started, run mode "
"is {}".format(run_mode))
cfg = Config(logger)
#
# Purge gateway configuration, if the config has gateways
if run_mode == 'gateway':
changes_made = delete_gateway_config(cfg, module)
elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0:
#
# Remove the disks on this host, that have been registered in the
# config object
changes_made = delete_images(cfg)
logger.info("END - GATEWAY configuration PURGE complete")
module.exit_json(changed=changes_made,
meta={"msg": "Purge of iSCSI settings ({}) "
"complete".format(run_mode)})
if __name__ == '__main__':
module_name = os.path.basename(__file__).replace('ansible_module_', '')
logger = logging.getLogger(os.path.basename(module_name))
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
maxBytes=5242880,
backupCount=7)
log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
'%(message)s')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
settings.init()
ansible_main()

View File

@ -29,7 +29,6 @@ class CallbackModule(CallbackBase):
'installer_phase_ceph_nfs',
'installer_phase_ceph_rbdmirror',
'installer_phase_ceph_client',
'installer_phase_ceph_iscsi_gw',
'installer_phase_ceph_rgw_loadbalancer',
'installer_phase_ceph_dashboard',
'installer_phase_ceph_grafana',
@ -71,10 +70,6 @@ class CallbackModule(CallbackBase):
'title': 'Install Ceph Client',
'playbook': 'roles/ceph-client/tasks/main.yml'
},
'installer_phase_ceph_iscsi_gw': {
'title': 'Install Ceph iSCSI Gateway',
'playbook': 'roles/ceph-iscsi-gw/tasks/main.yml'
},
'installer_phase_ceph_rgw_loadbalancer': {
'title': 'Install Ceph RGW LoadBalancer',
'playbook': 'roles/ceph-rgw-loadbalancer/tasks/main.yml'

View File

@ -1,6 +1,5 @@
ceph_repository: rhcs
ceph_origin: repository
ceph_iscsi_config_dev: false
ceph_rhcs_version: 5
containerized_deployment: true
ceph_docker_image: "rhceph/rhceph-5-rhel8"

View File

@ -9,7 +9,6 @@
or rgw_group_name in group_names
or mds_group_name in group_names
or nfs_group_name in group_names
or iscsi_gw_group_name in group_names
or client_group_name in group_names
or rbdmirror_group_name in group_names
or monitoring_group_name in group_names

View File

@ -143,4 +143,3 @@
- Restart ceph rgws
- Restart ceph mgrs
- Restart ceph rbdmirrors
- Restart ceph rbd-target-api-gw

View File

@ -47,7 +47,6 @@
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(monitoring_group_name, [])
environment:
HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"

View File

@ -308,38 +308,6 @@
- dashboard_rgw_api_no_ssl_verify | bool
- radosgw_frontend_ssl_certificate | length > 0
- name: Dashboard iscsi management
when: groups.get(iscsi_gw_group_name, []) | length > 0
run_once: true
block:
- name: Disable iscsi api ssl verification
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- api_secure | default(false) | bool
- generate_crt | default(false) | bool
- name: Add iscsi gateways - ipv4
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv4'
- name: Add iscsi gateways - ipv6
ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ansible.utils.ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv6'
- name: Disable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard

View File

@ -46,7 +46,6 @@ mds_group_name: mdss
nfs_group_name: nfss
rbdmirror_group_name: rbdmirrors
client_group_name: clients
iscsi_gw_group_name: iscsigws
mgr_group_name: mgrs
rgwloadbalancer_group_name: rgwloadbalancers
monitoring_group_name: monitoring
@ -58,7 +57,6 @@ adopt_label_group_names:
- "{{ nfs_group_name }}"
- "{{ rbdmirror_group_name }}"
- "{{ client_group_name }}"
- "{{ iscsi_gw_group_name }}"
- "{{ mgr_group_name }}"
- "{{ rgwloadbalancer_group_name }}"
- "{{ monitoring_group_name }}"
@ -76,7 +74,6 @@ ceph_rgw_firewall_zone: public
ceph_mds_firewall_zone: public
ceph_nfs_firewall_zone: public
ceph_rbdmirror_firewall_zone: public
ceph_iscsi_firewall_zone: public
ceph_dashboard_firewall_zone: public
ceph_rgwloadbalancer_firewall_zone: public
@ -210,8 +207,6 @@ nfs_ganesha_dev: false # use development repos for nfs-ganesha
# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
nfs_ganesha_flavor: "ceph_main"
ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
# REPOSITORY: CUSTOM
#
@ -695,75 +690,6 @@ alertmanager_port: 9093
alertmanager_cluster_port: 9094
alertmanager_conf_overrides: {}
alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
# igw
#
# `igw_network` variable is intended for allowing dashboard deployment with iSCSI node not residing in the same subnet than what is defined in `public_network`.
# For example:
# If the ceph public network is 2a00:8a60:1:c301::/64 and the iSCSI Gateway resides
# at a dedicated gateway network (2a00:8a60:1:c300::/64) (With routing between those networks).
# It means "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }}" will be empty.
# As a consequence, this prevent from deploying dashboard with iSCSI node when it reside in a subnet different than `public_network`.
# Using `igw_network` make it possible, set it with the subnet used by your iSCSI node.
igw_network: "{{ public_network }}"
##################################
# DEPRECIATED iSCSI TARGET SETUP #
##################################
# WARNING #
# The following values are depreciated. To setup targets, gateways, LUNs, and
# clients you should use gwcli or dashboard. If the following values are set,
# the old ceph-iscsi-config/ceph-iscsi-cli packages will be used.
# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
# client sees the gateway group as a single storage subsystem.
gateway_iqn: ""
# gateway_ip_list provides a list of the IP Addrresses - one per gateway - that will be used
# as an iscsi target portal ip. The list must be comma separated - and the order determines
# the sequence of TPG's within the iscsi target across each gateway. Once set, additional
# gateways can be added, but the order must *not* be changed.
gateway_ip_list: 0.0.0.0
# rbd_devices defines the images that should be created and exported from the iscsi gateways.
# If the rbd does not exist, it will be created for you. In addition you may increase the
# size of rbd's by changing the size parameter and rerunning the playbook. A size value lower
# than the current size of the rbd is ignored.
#
# the 'host' parameter defines which of the gateway nodes should handle the physical
# allocation/expansion or removal of the rbd
# to remove an image, simply use a state of 'absent'. This will first check the rbd is not allocated
# to any client, and the remove it from LIO and then delete the rbd image
#
# NB. this variable definition can be commented out to bypass LUN management
#
# Example:
#
# rbd_devices:
# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
# The settings are as follows;
# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
# - chap supplies the user and password the client will use for authentication of the
# form <user>/<password>
# - status shows the intended state of this client definition - 'present' or 'absent'
#
# NB. this definition can be commented out to skip client (nodeACL) management
#
# Example:
#
# client_connections:
# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
client_connections: {}
no_log_on_ceph_key_tasks: true

View File

@ -225,11 +225,6 @@
ansible.builtin.include_tasks: set_radosgw_address.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: Set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
ansible.builtin.set_fact:
use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
when: iscsi_gw_group_name in group_names
- name: Set_fact ceph_run_cmd
ansible.builtin.set_fact:
ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"

View File

@ -54,16 +54,6 @@
when: mgr_group_name in group_names
listen: "Restart ceph mgrs"
- name: Tcmu-runner handler
ansible.builtin.include_tasks: handler_tcmu_runner.yml
when: iscsi_gw_group_name in group_names
listen: "Restart ceph tcmu-runner"
- name: Rbd-target-api and rbd-target-gw handler
ansible.builtin.include_tasks: handler_rbd_target_api_gw.yml
when: iscsi_gw_group_name in group_names
listen: "Restart ceph rbd-target-api-gw"
- name: Ceph crash handler
ansible.builtin.include_tasks: handler_crash.yml
listen: "Restart ceph crash"

View File

@ -55,30 +55,6 @@
check_mode: false
when: inventory_hostname in groups.get(nfs_group_name, [])
- name: Check for a tcmu-runner container
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Check for a rbd-target-api container
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Check for a rbd-target-gw container
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Check for a ceph-crash container
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
register: ceph_crash_container_stat

View File

@ -205,30 +205,6 @@
check_mode: false
when: inventory_hostname in groups.get(nfs_group_name, [])
- name: Check for a tcmu-runner
ansible.builtin.command: "pgrep tcmu-runner"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Check for a rbd-target-api
ansible.builtin.command: "pgrep rbd-target-api"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Check for a rbd-target-gw
ansible.builtin.command: "pgrep name=rbd-target-gw"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: Check for a ceph-crash process
ansible.builtin.command: pgrep ceph-crash
changed_when: false

View File

@ -1,40 +0,0 @@
---
- name: Set _rbd_target_api_handler_called before restart
ansible.builtin.set_fact:
_rbd_target_api_handler_called: true
- name: Restart rbd-target-api
ansible.builtin.service:
name: rbd-target-api
state: restarted
when:
- ceph_rbd_target_api_stat.get('rc') == 0
- hostvars[item]['_rbd_target_api_handler_called'] | default(False) | bool
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
run_once: true
- name: Set _rbd_target_api_handler_called after restart
ansible.builtin.set_fact:
_rbd_target_api_handler_called: false
- name: Set _rbd_target_gw_handler_called before restart
ansible.builtin.set_fact:
_rbd_target_gw_handler_called: true
- name: Restart rbd-target-gw
ansible.builtin.service:
name: rbd-target-gw
state: restarted
when:
- ceph_rbd_target_gw_stat.get('rc') == 0
- hostvars[item]['_rbd_target_gw_handler_called'] | default(False) | bool
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
run_once: true
- name: Set _rbd_target_gw_handler_called after restart
ansible.builtin.set_fact:
_rbd_target_gw_handler_called: false

View File

@ -1,20 +0,0 @@
---
- name: Set _tcmu_runner_handler_called before restart
ansible.builtin.set_fact:
_tcmu_runner_handler_called: true
- name: Restart tcmu-runner
ansible.builtin.service:
name: tcmu-runner
state: restarted
when:
- ceph_tcmu_runner_stat.get('rc') == 0
- hostvars[item]['_tcmu_runner_handler_called'] | default(False) | bool
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
run_once: true
- name: Set _tcmu_runner_handler_called after restart
ansible.builtin.set_fact:
_tcmu_runner_handler_called: false

View File

@ -219,51 +219,6 @@
- rbdmirror_group_name is defined
- rbdmirror_group_name in group_names
- name: Open ceph networks on iscsi
ansible.posix.firewalld:
zone: "{{ ceph_iscsi_firewall_zone }}"
source: "{{ item }}"
permanent: true
immediate: true
state: enabled
with_items: "{{ public_network.split(',') }}"
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- name: Open iscsi target ports
ansible.posix.firewalld:
port: "3260/tcp"
zone: "{{ ceph_iscsi_firewall_zone }}"
permanent: true
immediate: true
state: enabled
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- name: Open iscsi api ports
ansible.posix.firewalld:
port: "{{ api_port | default(5000) }}/tcp"
zone: "{{ ceph_iscsi_firewall_zone }}"
permanent: true
immediate: true
state: enabled
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- name: Open iscsi/prometheus port
ansible.posix.firewalld:
port: "9287/tcp"
zone: "{{ ceph_iscsi_firewall_zone }}"
permanent: true
immediate: true
state: enabled
when:
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- name: Open dashboard ports
ansible.builtin.include_tasks: dashboard_firewall.yml
when: dashboard_enabled | bool

View File

@ -34,8 +34,7 @@
inventory_hostname in groups.get(mds_group_name, []) or
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, [])
inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: Add logrotate configuration
ansible.builtin.template:
@ -51,5 +50,4 @@
inventory_hostname in groups.get(mds_group_name, []) or
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, [])
inventory_hostname in groups.get(rbdmirror_group_name, [])

View File

@ -10,16 +10,3 @@
notifempty
su root root
}
/var/log/tcmu-runner/*.log {
rotate 7
daily
compress
sharedscripts
postrotate
killall -q -1 tcmu-runner || pkill -1 -x "tcmu-runner" || true
endscript
missingok
notifempty
su root root
}

View File

@ -1,13 +0,0 @@
Copyright 2016 Paul Cuzner pcuzner at redhat dot com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,3 +0,0 @@
# Ansible role: ceph-iscsi
Documentation is available at http://docs.ceph.com/ceph-ansible/.

View File

@ -1,49 +0,0 @@
---
# You can override vars by using host or group vars
###########
# GENERAL #
###########
# Whether or not to generate secure certificate to iSCSI gateway nodes
generate_crt: false
iscsi_conf_overrides: {}
iscsi_pool_name: rbd
# iscsi_pool_size: 3
copy_admin_key: true
##################
# RBD-TARGET-API #
##################
# Optional settings related to the CLI/API service
api_user: admin
api_password: admin
api_port: 5000
api_secure: false
loop_delay: 1
# set the variable below with a comma separated list of IPs
# in order to restrict the access to the iSCSI API
# trusted_ip_list: 192.168.122.1
##########
# DOCKER #
##########
# Resource limitation
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_target_api_docker_cpu_limit: 1

View File

@ -1,14 +0,0 @@
---
galaxy_info:
company: Red Hat
author: Paul Cuzner
description: Installs Ceph iSCSI Gateways
license: Apache
min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- 'all'
galaxy_tags:
- system
dependencies: []

View File

@ -1,72 +0,0 @@
---
- name: Get keys from monitors
ceph_key:
name: client.admin
cluster: "{{ cluster }}"
output_format: plain
state: info
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: _admin_key
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
run_once: true
when:
- cephx | bool
- copy_admin_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: Copy ceph key(s) if needed
ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
content: "{{ _admin_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when:
- cephx | bool
- copy_admin_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- name: Add mgr ip address to trusted list with dashboard - ipv4
ansible.builtin.set_fact:
trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
- ip_version == 'ipv4'
- name: Add mgr ip address to trusted list with dashboard - ipv6
ansible.builtin.set_fact:
trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
- ip_version == 'ipv6'
- name: Deploy gateway settings, used by the ceph_iscsi_config modules
openstack.config_template.config_template:
src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2"
dest: /etc/ceph/iscsi-gateway.cfg
config_type: ini
config_overrides: '{{ iscsi_conf_overrides }}'
mode: "0600"
notify: Restart ceph rbd-target-api-gw
- name: Set_fact container_exec_cmd
ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment | bool
- name: Create iscsi pool
ceph_pool:
name: "{{ iscsi_pool_name }}"
cluster: "{{ cluster }}"
size: "{{ iscsi_pool_size | default(omit) }}"
application: "rbd"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

View File

@ -1,33 +0,0 @@
---
- name: Create /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}
ansible.builtin.file:
path: "/var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}"
state: directory
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
mode: "{{ ceph_directories_mode }}"
- name: Create rbd target log directories
ansible.builtin.file:
path: '/var/log/{{ item }}'
state: directory
mode: "0755"
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
- name: Include_tasks systemd.yml
ansible.builtin.include_tasks: systemd.yml
- name: Systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
masked: false
daemon_reload: true
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api

View File

@ -1,90 +0,0 @@
---
- name: Create a temporary directory
ansible.builtin.tempfile:
state: directory
register: iscsi_ssl_tmp_dir
delegate_to: localhost
run_once: true
- name: Set_fact crt_files
ansible.builtin.set_fact:
crt_files:
- "iscsi-gateway.crt"
- "iscsi-gateway.key"
- "iscsi-gateway.pem"
- "iscsi-gateway-pub.key"
- name: Check for existing crt file(s) in monitor key/value store
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}"
with_items: "{{ crt_files }}"
changed_when: false
failed_when: crt_files_exist.rc not in [0, 22]
run_once: true
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
register: crt_files_exist
- name: Set_fact crt_files_missing
ansible.builtin.set_fact:
crt_files_missing: "{{ crt_files_exist.results | selectattr('rc', 'equalto', 0) | map(attribute='rc') | list | length != crt_files | length }}"
- name: Generate ssl crt/key files
when: crt_files_missing
block:
- name: Create ssl crt/key files
ansible.builtin.command: >
openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
-x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
-subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
delegate_to: localhost
run_once: true
changed_when: false
with_items: "{{ crt_files_exist.results }}"
- name: Create pem # noqa: no-changed-when
ansible.builtin.shell: >
cat {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
{{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem
delegate_to: localhost
run_once: true
register: pem
with_items: "{{ crt_files_exist.results }}"
- name: Create public key from pem
ansible.builtin.shell: >
openssl x509 -inform pem -in {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem
-pubkey -noout > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway-pub.key
delegate_to: localhost
run_once: true
when: pem.changed
tags: skip_ansible_lint
- name: Slurp ssl crt/key files
ansible.builtin.slurp:
src: "{{ iscsi_ssl_tmp_dir.path }}/{{ item }}"
register: iscsi_ssl_files_content
with_items: "{{ crt_files }}"
run_once: true
delegate_to: localhost
- name: Store ssl crt/key files
ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}"
run_once: true
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
with_items: "{{ iscsi_ssl_files_content.results }}"
changed_when: false
- name: Copy crt file(s) to gateway nodes
ansible.builtin.copy:
content: "{{ item.stdout | b64decode }}"
dest: "/etc/ceph/{{ item.item }}"
owner: root
group: root
mode: "0400"
changed_when: false
with_items: "{{ crt_files_exist.results if not crt_files_missing else iscsi_ssl_files_content.results }}"
when: not crt_files_missing
- name: Clean temporary directory
ansible.builtin.file:
path: "{{ iscsi_ssl_tmp_dir.path }}"
state: absent

View File

@ -1,28 +0,0 @@
---
- name: Include common.yml
ansible.builtin.include_tasks: common.yml
- name: Include non-container/prerequisites.yml
ansible.builtin.include_tasks: non-container/prerequisites.yml
when: not containerized_deployment | bool
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
# the API for https support.
- name: Include deploy_ssl_keys.yml
ansible.builtin.include_tasks: deploy_ssl_keys.yml
when: generate_crt | bool
- name: Include non-container/configure_iscsi.yml
ansible.builtin.include_tasks: non-container/configure_iscsi.yml
when:
- not containerized_deployment | bool
- not use_new_ceph_iscsi | bool
- name: Include non-container/postrequisites.yml
ansible.builtin.include_tasks: non-container/postrequisites.yml
when: not containerized_deployment | bool
- name: Include containerized.yml
ansible.builtin.include_tasks: containerized.yml
when: containerized_deployment | bool

View File

@ -1,33 +0,0 @@
---
- name: Igw_gateway (tgt) | configure iscsi target (gateway)
igw_gateway:
mode: "target"
gateway_iqn: "{{ gateway_iqn }}"
gateway_ip_list: "{{ gateway_ip_list }}"
register: target
- name: Igw_lun | configure luns (create/map rbds and add to lio)
igw_lun:
pool: "{{ item.pool }}"
image: "{{ item.image }}"
size: "{{ item.size }}"
host: "{{ item.host }}"
state: "{{ item.state }}"
with_items: "{{ rbd_devices }}"
register: images
- name: Igw_gateway (map) | map luns to the iscsi target
igw_gateway:
mode: "map"
gateway_iqn: "{{ gateway_iqn }}"
gateway_ip_list: "{{ gateway_ip_list }}"
register: luns
- name: Igw_client | configure client connectivity
igw_client:
client_iqn: "{{ item.client }}"
image_list: "{{ item.image_list }}"
chap: "{{ item.chap }}"
state: "{{ item.status }}"
with_items: "{{ client_connections }}"
register: clients

View File

@ -1,9 +0,0 @@
- name: Start rbd-target-api and rbd-target-gw
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
masked: false
with_items:
- rbd-target-api
- rbd-target-gw

View File

@ -1,90 +0,0 @@
---
- name: Red hat based systems tasks
when: ansible_facts['os_family'] == 'RedHat'
block:
- name: Set_fact common_pkgs
ansible.builtin.set_fact:
common_pkgs:
- tcmu-runner
- targetcli
- name: Set_fact base iscsi pkgs if new style ceph-iscsi
ansible.builtin.set_fact:
iscsi_base:
- ceph-iscsi
when: use_new_ceph_iscsi | bool
- name: Set_fact base iscsi pkgs if using older ceph-iscsi-config
ansible.builtin.set_fact:
iscsi_base:
- ceph-iscsi-cli
- ceph-iscsi-config
when: not use_new_ceph_iscsi | bool
- name: When ceph_iscsi_config_dev is true
when:
- ceph_origin == 'repository'
- ceph_repository in ['dev', 'community']
- ceph_iscsi_config_dev | bool
block:
- name: Ceph-iscsi dependency repositories
ansible.builtin.get_url:
url: "https://shaman.ceph.com/api/repos/tcmu-runner/main/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}"
dest: '/etc/yum.repos.d/tcmu-runner-dev.repo'
force: true
mode: "0644"
register: result
until: result is succeeded
- name: Ceph-iscsi development repository
ansible.builtin.get_url:
url: "https://shaman.ceph.com/api/repos/{{ item }}/main/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo"
dest: '/etc/yum.repos.d/{{ item }}-dev.repo'
force: true
mode: "0644"
register: result
until: result is succeeded
with_items: '{{ iscsi_base }}'
when: ceph_repository == 'dev'
- name: Ceph-iscsi stable repository
ansible.builtin.get_url:
url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
dest: /etc/yum.repos.d/ceph-iscsi.repo
force: true
mode: "0644"
register: result
until: result is succeeded
when: ceph_repository == 'community'
- name: Install ceph iscsi package
ansible.builtin.package:
name: "{{ common_pkgs + iscsi_base }}"
state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
- name: Check the status of the target.service override
ansible.builtin.stat:
path: /etc/systemd/system/target.service
register: target
- name: Mask the target service - preventing manual start
ansible.builtin.systemd:
name: target
masked: true
enabled: false
when:
- target.stat.exists
- not target.stat.islnk
# Only start tcmu-runner, so configure_iscsi.yml can create disks.
# We must start rbd-target-gw/api after configure_iscsi.yml to avoid
# races where they are both trying to setup the same object during
# a rolling update.
- name: Start tcmu-runner
ansible.builtin.systemd:
name: tcmu-runner
state: started
enabled: true
masked: false

View File

@ -1,15 +0,0 @@
---
- name: Generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
ansible.builtin.template:
src: "{{ role_path }}/templates/{{ item }}.service.j2"
dest: /etc/systemd/system/{{ item }}.service
owner: "root"
group: "root"
mode: "0644"
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
notify:
- Restart ceph tcmu-runner
- Restart ceph rbd-target-api-gw

View File

@ -1,30 +0,0 @@
# This is seed configuration used by the ceph_iscsi_config modules
# when handling configuration tasks for iscsi gateway(s)
#
# {{ ansible_managed }}
[config]
cluster_name = {{ cluster }}
pool = {{ iscsi_pool_name }}
# API settings.
# The API supports a number of options that allow you to tailor it to your
# local environment. If you want to run the API under https, you will need to
# create cert/key files that are compatible for each iSCSI gateway node, that is
# not locked to a specific node. SSL cert and key files *must* be called
# 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory
# on *each* gateway node. With the SSL files in place, you can use 'api_secure = true'
# to switch to https mode.
# To support the API, the bear minimum settings are:
api_secure = {{ api_secure }}
# Optional settings related to the CLI/API service
api_user = {{ api_user }}
api_password = {{ api_password }}
api_port = {{ api_port }}
loop_delay = {{ loop_delay }}
{% if trusted_ip_list is defined %}
trusted_ip_list = {{ trusted_ip_list }}
{% endif %}

View File

@ -1,58 +0,0 @@
[Unit]
Description=RBD Target API Service
{% if container_binary == 'docker' %}
After=docker.service network-online.target local-fs.target time-sync.target
Requires=docker.service
{% else %}
After=network-online.target local-fs.target time-sync.target
{% endif %}
Wants=network-online.target local-fs.target time-sync.target
[Service]
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage rbd-target-api
ExecStartPre=-/usr/bin/mkdir -p /var/log/rbd-target-api
{% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-api
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-api
ExecStartPre=-/usr/bin/sh -c "if ! grep -qs /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} /proc/mounts; then mount -t configfs none /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}; fi"
ExecStart=/usr/bin/{{ container_binary }} run --rm \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
{% endif %}
--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
-v /etc/localtime:/etc/localtime:ro \
--privileged \
--net=host \
-v /dev:/dev \
-v /dev/log:/dev/log \
-v /lib/modules:/lib/modules \
-v /etc/ceph:/etc/ceph \
-v /var/log/rbd-target-api:/var/log/rbd-target-api:z \
-v /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}:/sys/kernel/config \
--name=rbd-target-api \
--entrypoint=/usr/bin/rbd-target-api \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-api
{% endif %}
ExecStopPost=-/usr/bin/sh -c "if grep -qs /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} /proc/mounts; then umount /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}; fi"
KillMode=none
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
{% if container_binary == 'podman' %}
Type=forking
PIDFile=/%t/%n-pid
{% endif %}
[Install]
WantedBy=multi-user.target

View File

@ -1,58 +0,0 @@
[Unit]
Description=RBD Target Gateway Service
{% if container_binary == 'docker' %}
After=docker.service network-online.target local-fs.target time-sync.target
Requires=docker.service
{% else %}
After=network-online.target local-fs.target time-sync.target
{% endif %}
Wants=network-online.target local-fs.target time-sync.target
[Service]
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage rbd-target-gw
ExecStartPre=-/usr/bin/mkdir -p /var/log/rbd-target-gw
{% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-gw
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-gw
ExecStartPre=-/usr/bin/sh -c "if ! grep -qs /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} /proc/mounts; then mount -t configfs none /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}; fi"
ExecStart=/usr/bin/{{ container_binary }} run --rm \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
{% endif %}
--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
-v /etc/localtime:/etc/localtime:ro \
--privileged \
--net=host \
-v /dev:/dev \
-v /dev/log:/dev/log \
-v /lib/modules:/lib/modules \
-v /etc/ceph:/etc/ceph \
-v /var/log/rbd-target-gw:/var/log/rbd-target-gw:z \
-v /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}:/sys/kernel/config \
--name=rbd-target-gw \
--entrypoint=/usr/bin/rbd-target-gw \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-gw
{% endif %}
ExecStopPost=-/usr/bin/sh -c "if grep -qs /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} /proc/mounts; then umount /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}; fi"
KillMode=none
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
{% if container_binary == 'podman' %}
Type=forking
PIDFile=/%t/%n-pid
{% endif %}
[Install]
WantedBy=multi-user.target

View File

@ -1,58 +0,0 @@
[Unit]
Description=TCMU Runner
{% if container_binary == 'docker' %}
After=docker.service network-online.target local-fs.target time-sync.target
Requires=docker.service
{% else %}
After=network-online.target local-fs.target time-sync.target
{% endif %}
Wants=network-online.target local-fs.target time-sync.target
[Service]
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage tcmu-runner
ExecStartPre=-/usr/bin/mkdir -p /var/log/tcmu-runner
{% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop tcmu-runner
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm tcmu-runner
ExecStartPre=-/usr/bin/sh -c "if ! grep -qs /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} /proc/mounts; then mount -t configfs none /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}; fi"
ExecStart=/usr/bin/{{ container_binary }} run --rm \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
{% endif %}
--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
-v /etc/localtime:/etc/localtime:ro \
--privileged \
--net=host \
-v /dev:/dev \
-v /lib/modules:/lib/modules \
-v /etc/ceph:/etc/ceph \
-v /var/log/tcmu-runner:/var/log/tcmu-runner:z \
-v /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}:/sys/kernel/config \
--name=tcmu-runner \
--entrypoint=tcmu-runner \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
--tcmu-log-dir /var/log/tcmu-runner
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
ExecStopPost=-/usr/bin/{{ container_binary }} stop tcmu-runner
{% endif %}
ExecStopPost=-/usr/bin/sh -c "if grep -qs /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} /proc/mounts; then umount /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}; fi"
KillMode=none
Restart=always
RestartSec=10s
TimeoutStartSec=120
TimeoutStopSec=15
{% if container_binary == 'podman' %}
Type=forking
PIDFile=/%t/%n-pid
{% endif %}
[Install]
WantedBy=multi-user.target

View File

@ -31,15 +31,6 @@ scrape_configs:
labels:
instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
{% endfor %}
{% if iscsi_gw_group_name in groups %}
- job_name: 'iscsi-gws'
static_configs:
{% for host in groups[iscsi_gw_group_name] %}
- targets: ['{{ host }}:9287']
labels:
instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
{% endfor %}
{% endif %}
alerting:
alertmanagers:
- scheme: http

View File

@ -1,42 +0,0 @@
---
- name: Fail on unsupported distribution for iscsi gateways
ansible.builtin.fail:
msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora', 'AlmaLinux', 'Rocky']
- name: Make sure gateway_ip_list is configured
ansible.builtin.fail:
msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
when:
- gateway_ip_list == '0.0.0.0'
- not containerized_deployment | bool
- not use_new_ceph_iscsi | bool
- name: Make sure gateway_iqn is configured
ansible.builtin.fail:
msg: "you must set a iqn for the iSCSI target"
when:
- gateway_iqn | length == 0
- not containerized_deployment | bool
- not use_new_ceph_iscsi | bool
- name: Fail if unsupported chap configuration
ansible.builtin.fail:
msg: "Mixing clients with CHAP enabled and disabled is not supported."
with_items: "{{ client_connections }}"
when:
- item.status is defined
- item.status == "present"
- item.chap
- " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list"
- name: Fail on unsupported distribution version for iscsi gateways
ansible.builtin.command: "grep -q {{ item }}=m {% if is_atomic | bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}"
register: iscsi_kernel
changed_when: false
failed_when: iscsi_kernel.rc != 0
loop:
- CONFIG_TARGET_CORE
- CONFIG_TCM_USER2
- CONFIG_ISCSI_TARGET
when: ansible_facts['distribution'] in ['RedHat', 'CentOS']

View File

@ -137,10 +137,6 @@
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_create_pools is defined
- name: Include check_iscsi.yml
ansible.builtin.include_tasks: check_iscsi.yml
when: iscsi_gw_group_name in group_names
- name: Include check_nfs.yml
ansible.builtin.include_tasks: check_nfs.yml
when: inventory_hostname in groups.get(nfs_group_name, [])

View File

@ -18,7 +18,6 @@
- nfss
- rbdmirrors
- clients
- iscsigws
- mgrs
- monitoring
@ -457,46 +456,6 @@
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts:
- iscsigws
gather_facts: false
any_errors_fatal: true
become: True
tasks:
# pre-tasks for following imports -
- name: set ceph iscsi gateway install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_iscsi_gw:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-iscsi-gw
# post-tasks for preceding imports -
post_tasks:
- name: set ceph iscsi gw install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_iscsi_gw:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- import_playbook: dashboard.yml
when:
- dashboard_enabled | bool

View File

@ -20,7 +20,6 @@
- rbdmirrors
- clients
- mgrs
- iscsigws
- monitoring
- rgwloadbalancers
@ -440,45 +439,6 @@
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts:
- iscsigws
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph iscsi gateway install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_iscsi_gw:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-iscsi-gw
post_tasks:
- name: set ceph iscsi gw install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_iscsi_gw:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts:
- rgwloadbalancers
gather_facts: false

View File

@ -167,8 +167,8 @@ def node(host, request):
if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user: # noqa E501
pytest.skip('Not a valid test for a non-secondary rbd-mirror node')
if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['iscsigws'], ['clients'], ['monitoring']]:
pytest.skip('Not a valid test for nfs, client or iscsigw nodes')
if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
pytest.skip('Not a valid test for nfs or client nodes')
if request.node.get_closest_marker("no_docker") and docker:
pytest.skip(
@ -213,8 +213,6 @@ def pytest_collection_modifyitems(session, config, items):
item.add_marker(pytest.mark.rgws)
elif "nfs" in test_path:
item.add_marker(pytest.mark.nfss)
elif "iscsi" in test_path:
item.add_marker(pytest.mark.iscsigws)
elif "grafana" in test_path:
item.add_marker(pytest.mark.grafanas)
else:

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 1
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 1
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -8,7 +8,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -29,8 +29,5 @@ client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[monitoring]
mon0

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1
# SUBNETS TO USE FOR THE VMS

View File

@ -29,8 +29,5 @@ client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[ceph_monitoring]
mon0

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1
# INSTALL SOURCE OF CEPH

View File

@ -29,8 +29,5 @@ client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[monitoring]
mon0

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1
# SUBNETS TO USE FOR THE VMS

View File

@ -29,8 +29,5 @@ client1
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[ceph_monitoring]
mon0

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
iscsi_gw_vms: 1
mgr_vms: 1
# INSTALL SOURCE OF CEPH

View File

@ -24,8 +24,5 @@ nfs0
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[monitoring]
mon0

View File

@ -12,7 +12,6 @@ nfs_vms: 1
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
iscsi_gw_vms: 1
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -8,7 +8,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 2
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -8,7 +8,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 2
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0

View File

@ -12,7 +12,6 @@ nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 3
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS

Some files were not shown because too many files have changed in this diff Show More