ceph-rgw: introduce rgw zone to the name schema

This is needed by ceph-exporter as it is parsing the socket by the number of dots.
Although the rgw_zone variable is only using for constructing the client name
and has nothing to do with multisiting.

Signed-off-by: Seena Fallah <seenafallah@gmail.com>
pull/7557/head
Seena Fallah 2024-05-07 20:41:53 +02:00 committed by Guillaume Abrioux
parent 878cce5b48
commit 1121e6d98a
26 changed files with 58 additions and 60 deletions

1
.gitignore vendored
View File

@ -21,3 +21,4 @@ ceph-ansible.spec
!.mergify.yml !.mergify.yml
!raw_install_python.yml !raw_install_python.yml
!requirements.yml !requirements.yml
.vscode/

View File

@ -368,8 +368,7 @@ dummy:
#radosgw_address_block: subnet #radosgw_address_block: subnet
#radosgw_keystone_ssl: false # activate this when using keystone PKI keys #radosgw_keystone_ssl: false # activate this when using keystone PKI keys
#radosgw_num_instances: 1 #radosgw_num_instances: 1
# Rados Gateway options #rgw_zone: default # This is used for rgw instance client names.
#email_address: foo@bar.com
## Testing mode ## Testing mode
@ -445,7 +444,7 @@ dummy:
# global: # global:
# foo: 1234 # foo: 1234
# bar: 5678 # bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}": # "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1 # rgw_zone: zone1
# #
#ceph_conf_overrides: {} #ceph_conf_overrides: {}

View File

@ -88,11 +88,11 @@ dummy:
# If you want to add parameters, you should retain the existing ones and include the new ones. # If you want to add parameters, you should retain the existing ones and include the new ones.
#ceph_rgw_container_params: #ceph_rgw_container_params:
# volumes: # volumes:
# - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:z # - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
# args: # args:
# - -f # - -f
# - -n=client.rgw.{{ ansible_facts['hostname'] }}.${INST_NAME} # - -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}
# - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring # - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
########### ###########
# SYSTEMD # # SYSTEMD #

View File

@ -961,7 +961,7 @@
- name: Stop and disable ceph-radosgw systemd service - name: Stop and disable ceph-radosgw systemd service
ansible.builtin.service: ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped state: stopped
enabled: false enabled: false
failed_when: false failed_when: false
@ -975,7 +975,7 @@
failed_when: false failed_when: false
- name: Reset failed ceph-radosgw systemd unit - name: Reset failed ceph-radosgw systemd unit
ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module
changed_when: false changed_when: false
failed_when: false failed_when: false
loop: '{{ rgw_instances }}' loop: '{{ rgw_instances }}'
@ -992,13 +992,13 @@
- name: Remove legacy ceph radosgw data - name: Remove legacy ceph radosgw data
ansible.builtin.file: ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: absent state: absent
loop: '{{ rgw_instances }}' loop: '{{ rgw_instances }}'
- name: Remove legacy ceph radosgw directory - name: Remove legacy ceph radosgw directory
ansible.builtin.file: ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}" path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}"
state: absent state: absent
- name: Redeploy rbd-mirror daemons - name: Redeploy rbd-mirror daemons

View File

@ -259,7 +259,7 @@
- name: Stop ceph rgws with systemd - name: Stop ceph rgws with systemd
ansible.builtin.service: ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped state: stopped
enabled: false enabled: false
failed_when: false failed_when: false

View File

@ -912,7 +912,7 @@
- name: Stop ceph rgw when upgrading from stable-3.2 # noqa: ignore-errors - name: Stop ceph rgw when upgrading from stable-3.2 # noqa: ignore-errors
ansible.builtin.systemd: ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }} name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}
state: stopped state: stopped
enabled: false enabled: false
masked: true masked: true
@ -920,7 +920,7 @@
- name: Stop ceph rgw - name: Stop ceph rgw
ansible.builtin.systemd: ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: stopped state: stopped
enabled: false enabled: false
masked: true masked: true

View File

@ -564,7 +564,7 @@
tasks: tasks:
- name: Stop non-containerized ceph rgw(s) - name: Stop non-containerized ceph rgw(s)
ansible.builtin.service: ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped state: stopped
enabled: false enabled: false
with_items: "{{ rgw_instances }}" with_items: "{{ rgw_instances }}"

View File

@ -7,7 +7,7 @@
ceph_conf_overrides: ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
"rgw keystone api version": "2" "rgw keystone api version": "2"
"rgw keystone url": "http://192.168.0.1:35357" "rgw keystone url": "http://192.168.0.1:35357"
"rgw keystone admin token": "password" "rgw keystone admin token": "password"

View File

@ -7,7 +7,7 @@
ceph_conf_overrides: ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
"rgw keystone api version": "3" "rgw keystone api version": "3"
"rgw keystone url": "http://192.168.0.1:35357" "rgw keystone url": "http://192.168.0.1:35357"
"rgw keystone admin token": "password" "rgw keystone admin token": "password"

View File

@ -6,6 +6,6 @@
# The double quotes are important, do NOT remove them. # The double quotes are important, do NOT remove them.
ceph_conf_overrides: ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
rgw enable static website = true rgw enable static website = true
rgw dns s3website name = objects-website-region.domain.com rgw dns s3website name = objects-website-region.domain.com

View File

@ -6,7 +6,7 @@
# The double quotes are important, do NOT remove them. # The double quotes are important, do NOT remove them.
ceph_conf_overrides: ceph_conf_overrides:
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
rgw enable usage log = true rgw enable usage log = true
rgw usage log tick interval = 30 rgw usage log tick interval = 30
rgw usage log flush threshold = 1024 rgw usage log flush threshold = 1024

View File

@ -131,8 +131,8 @@
ansible.builtin.set_fact: ansible.builtin.set_fact:
_ceph_ansible_rgw_conf: >- _ceph_ansible_rgw_conf: >-
{{ _ceph_ansible_rgw_conf | default({}) | combine({ {{ _ceph_ansible_rgw_conf | default({}) | combine({
'client.rgw.' + ansible_facts['hostname'] + '.' + item.instance_name: { 'client.rgw.' + rgw_zone + '.' + ansible_facts['hostname'] + '.' + item.instance_name: {
'log_file': '/var/log/ceph/' + cluster + '-rgw-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log', 'log_file': '/var/log/ceph/' + cluster + '-rgw-' + rgw_zone + '-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log',
'rgw_frontends': 'beast ' + _rgw_beast_endpoint + _rgw_beast_ssl_option, 'rgw_frontends': 'beast ' + _rgw_beast_endpoint + _rgw_beast_ssl_option,
} }
}, recursive=true) }} }, recursive=true) }}

View File

@ -1,7 +1,7 @@
--- ---
- name: Create rados gateway instance directories - name: Create rados gateway instance directories
ansible.builtin.file: ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -10,7 +10,7 @@
- name: Generate environment file - name: Generate environment file
ansible.builtin.copy: ansible.builtin.copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile" dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
owner: "root" owner: "root"
group: "root" group: "root"
mode: "0644" mode: "0644"

View File

@ -360,8 +360,7 @@ radosgw_address: x.x.x.x
radosgw_address_block: subnet radosgw_address_block: subnet
radosgw_keystone_ssl: false # activate this when using keystone PKI keys radosgw_keystone_ssl: false # activate this when using keystone PKI keys
radosgw_num_instances: 1 radosgw_num_instances: 1
# Rados Gateway options rgw_zone: default # This is used for rgw instance client names.
email_address: foo@bar.com
## Testing mode ## Testing mode
@ -437,7 +436,7 @@ ceph_rbd_mirror_pool: "rbd"
# global: # global:
# foo: 1234 # foo: 1234
# bar: 5678 # bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}": # "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1 # rgw_zone: zone1
# #
ceph_conf_overrides: {} ceph_conf_overrides: {}

View File

@ -24,7 +24,7 @@
when: inventory_hostname in groups.get(mds_group_name, []) when: inventory_hostname in groups.get(mds_group_name, [])
- name: Check for a rgw container - name: Check for a rgw container
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'" ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}'"
register: ceph_rgw_container_stat register: ceph_rgw_container_stat
changed_when: false changed_when: false
failed_when: false failed_when: false

View File

@ -13,13 +13,14 @@ fi
INSTANCES_NAME=({% for i in rgw_instances %}{{ i.instance_name }} {% endfor %}) INSTANCES_NAME=({% for i in rgw_instances %}{{ i.instance_name }} {% endfor %})
RGW_IPS=({% for i in rgw_instances %}{{ i.radosgw_address }} {% endfor %}) RGW_IPS=({% for i in rgw_instances %}{{ i.radosgw_address }} {% endfor %})
RGW_PORTS=({% for i in rgw_instances %}{{ i.radosgw_frontend_port }} {% endfor %}) RGW_PORTS=({% for i in rgw_instances %}{{ i.radosgw_frontend_port }} {% endfor %})
RGW_ZONE="{{ rgw_zone }}"
declare -a DOCKER_EXECS declare -a DOCKER_EXECS
declare -a SOCKET_PREFIX declare -a SOCKET_PREFIX
for ((i=0; i<${RGW_NUMS}; i++)); do for ((i=0; i<${RGW_NUMS}; i++)); do
SOCKET_PREFIX[i]="/var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.${INSTANCES_NAME[i]}" SOCKET_PREFIX[i]="/var/run/ceph/{{ cluster }}-client.rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}"
DOCKER_EXECS[i]="" DOCKER_EXECS[i]=""
{% if containerized_deployment | bool %} {% if containerized_deployment | bool %}
DOCKER_EXECS[i]="{{ container_binary }} exec ceph-rgw-${HOST_NAME}-${INSTANCES_NAME[i]}" DOCKER_EXECS[i]="{{ container_binary }} exec ceph-rgw-${RGW_ZONE}-${HOST_NAME}-${INSTANCES_NAME[i]}"
{% endif %} {% endif %}
done done
@ -38,7 +39,7 @@ check_socket() {
done done
if [ $succ -ne 1 ]; then if [ $succ -ne 1 ]; then
echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:" echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:"
journalctl -u ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]} journalctl -u ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}
exit 1 exit 1
fi fi
} }
@ -81,10 +82,10 @@ for ((i=0; i<${RGW_NUMS}; i++)); do
# Check if systemd unit exists # Check if systemd unit exists
# This is needed for new instances as the restart might trigger before the deployment # This is needed for new instances as the restart might trigger before the deployment
if systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}"; then if systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}"; then
systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]} systemctl restart ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}
else else
echo "Systemd unit ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist." echo "Systemd unit ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist."
continue continue
fi fi

View File

@ -80,11 +80,11 @@ rgw_config_keys: "/" # DON'T TOUCH ME
# If you want to add parameters, you should retain the existing ones and include the new ones. # If you want to add parameters, you should retain the existing ones and include the new ones.
ceph_rgw_container_params: ceph_rgw_container_params:
volumes: volumes:
- /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:z - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
args: args:
- -f - -f
- -n=client.rgw.{{ ansible_facts['hostname'] }}.${INST_NAME} - -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}
- -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
########### ###########
# SYSTEMD # # SYSTEMD #

View File

@ -1,6 +1,6 @@
--- ---
- name: Restart rgw - name: Restart rgw
ansible.builtin.service: ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: restarted state: restarted
with_items: "{{ rgw_instances }}" with_items: "{{ rgw_instances }}"

View File

@ -1,13 +1,4 @@
--- ---
- name: Create rados gateway directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_directories_mode }}"
with_items: "{{ rbd_client_admin_socket_path }}"
- name: Get keys from monitors - name: Get keys from monitors
ceph_key: ceph_key:
name: "{{ item.name }}" name: "{{ item.name }}"

View File

@ -1,7 +1,7 @@
--- ---
- name: Create rados gateway directories - name: Create rados gateway directories
ansible.builtin.file: ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@ -12,11 +12,11 @@
- name: Create rgw keyrings - name: Create rgw keyrings
ceph_key: ceph_key:
name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
user: "client.bootstrap-rgw" user: "client.bootstrap-rgw"
user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring" dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
caps: caps:
osd: 'allow rwx' osd: 'allow rwx'
mon: 'allow rw' mon: 'allow rw'
@ -34,7 +34,7 @@
- name: Get keys from monitors - name: Get keys from monitors
ceph_key: ceph_key:
name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" name: "client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
cluster: "{{ cluster }}" cluster: "{{ cluster }}"
output_format: plain output_format: plain
state: info state: info
@ -51,7 +51,7 @@
- name: Copy ceph key(s) if needed - name: Copy ceph key(s) if needed
ansible.builtin.copy: ansible.builtin.copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring" dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring"
content: "{{ item.stdout + '\n' }}" content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"

View File

@ -4,7 +4,7 @@
- name: Systemd start rgw container - name: Systemd start rgw container
ansible.builtin.systemd: ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: started state: started
enabled: true enabled: true
masked: false masked: false

View File

@ -16,7 +16,7 @@
- name: Start rgw instance - name: Start rgw instance
ansible.builtin.systemd: ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: started state: started
enabled: true enabled: true
masked: false masked: false

View File

@ -14,12 +14,12 @@ Wants=network-online.target local-fs.target time-sync.target
EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile
{% if container_binary == 'podman' %} {% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
{% else %} {% else %}
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
{% endif %} {% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %} {% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@ -46,14 +46,14 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if radosgw_frontend_ssl_certificate -%} {% if radosgw_frontend_ssl_certificate -%}
-v {{ radosgw_frontend_ssl_certificate }}:{{ radosgw_frontend_ssl_certificate }} \ -v {{ radosgw_frontend_ssl_certificate }}:{{ radosgw_frontend_ssl_certificate }} \
{% endif -%} {% endif -%}
--name=ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} \ --name=ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} \
--entrypoint=/usr/bin/radosgw \ --entrypoint=/usr/bin/radosgw \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ (ceph_common_container_params['args'] + ceph_rgw_container_params['args'] | default([])) | join(' ') }} {{ (ceph_common_container_params['args'] + ceph_rgw_container_params['args'] | default([])) | join(' ') }}
{% if container_binary == 'podman' %} {% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %} {% else %}
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
{% endif %} {% endif %}
KillMode=none KillMode=none
Restart=always Restart=always

View File

@ -58,7 +58,7 @@
(groups.get(mgr_group_name, []) | length == 0 and inventory_hostname in groups.get(mon_group_name, [])) (groups.get(mgr_group_name, []) | length == 0 and inventory_hostname in groups.get(mon_group_name, []))
- name: Get rgw log - name: Get rgw log
ansible.builtin.shell: journalctl -l -u ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log ansible.builtin.shell: journalctl -l -u ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log
changed_when: false changed_when: false
with_items: "{{ rgw_instances | default([]) }}" with_items: "{{ rgw_instances | default([]) }}"
when: inventory_hostname in groups.get(rgw_group_name, []) when: inventory_hostname in groups.get(rgw_group_name, [])

View File

@ -13,9 +13,10 @@ class TestRGWs(object):
def test_rgw_service_enabled_and_running(self, node, host): def test_rgw_service_enabled_and_running(self, node, host):
for i in range(int(node["radosgw_num_instances"])): for i in range(int(node["radosgw_num_instances"])):
service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format( service_name = "ceph-radosgw@rgw.{rgw_zone}.{hostname}.rgw{seq}".format(
hostname=node["vars"]["inventory_hostname"], hostname=node["vars"]["inventory_hostname"],
seq=i seq=i,
rgw_zone=node["vars"].get("rgw_zone", "default"),
) )
s = host.service(service_name) s = host.service(service_name)
assert s.is_enabled assert s.is_enabled

View File

@ -62,6 +62,7 @@ commands=
# set up the cluster again # set up the cluster again
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \ yes_i_know=true \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
@ -83,6 +84,7 @@ commands=
# set up the cluster again # set up the cluster again
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \ yes_i_know=true \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
@ -187,6 +189,7 @@ commands=
commands= commands=
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \ yes_i_know=true \
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
@ -201,6 +204,7 @@ commands=
commands= commands=
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \ yes_i_know=true \
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
@ -215,6 +219,7 @@ commands=
commands= commands=
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \ yes_i_know=true \
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \
@ -229,6 +234,7 @@ commands=
commands= commands=
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
no_log_on_ceph_key_tasks=false \
yes_i_know=true \ yes_i_know=true \
ireallymeanit=yes \ ireallymeanit=yes \
ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \