mirror of https://github.com/ceph/ceph-ansible.git
ceph-rgw: introduce rgw zone to the name schema
This is needed by ceph-exporter as it is parsing the socket by the number of dots.
Although the rgw_zone variable is only using for constructing the client name
and has nothing to do with multisiting.
Signed-off-by: Seena Fallah <seenafallah@gmail.com>
(cherry picked from commit 1121e6d98a
)
pull/7561/head
parent
89097bec22
commit
faae48d75b
|
@ -21,3 +21,4 @@ ceph-ansible.spec
|
|||
!.mergify.yml
|
||||
!raw_install_python.yml
|
||||
!requirements.yml
|
||||
.vscode/
|
||||
|
|
|
@ -368,8 +368,7 @@ dummy:
|
|||
#radosgw_address_block: subnet
|
||||
#radosgw_keystone_ssl: false # activate this when using keystone PKI keys
|
||||
#radosgw_num_instances: 1
|
||||
# Rados Gateway options
|
||||
#email_address: foo@bar.com
|
||||
#rgw_zone: default # This is used for rgw instance client names.
|
||||
|
||||
|
||||
## Testing mode
|
||||
|
@ -445,7 +444,7 @@ dummy:
|
|||
# global:
|
||||
# foo: 1234
|
||||
# bar: 5678
|
||||
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
|
||||
# "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
|
||||
# rgw_zone: zone1
|
||||
#
|
||||
#ceph_conf_overrides: {}
|
||||
|
|
|
@ -88,11 +88,11 @@ dummy:
|
|||
# If you want to add parameters, you should retain the existing ones and include the new ones.
|
||||
#ceph_rgw_container_params:
|
||||
# volumes:
|
||||
# - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
|
||||
# - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
|
||||
# args:
|
||||
# - -f
|
||||
# - -n=client.rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}
|
||||
# - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
|
||||
# - -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}
|
||||
# - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
|
||||
|
||||
###########
|
||||
# SYSTEMD #
|
||||
|
|
|
@ -961,7 +961,7 @@
|
|||
|
||||
- name: Stop and disable ceph-radosgw systemd service
|
||||
ansible.builtin.service:
|
||||
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: stopped
|
||||
enabled: false
|
||||
failed_when: false
|
||||
|
@ -975,7 +975,7 @@
|
|||
failed_when: false
|
||||
|
||||
- name: Reset failed ceph-radosgw systemd unit
|
||||
ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module
|
||||
ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: '{{ rgw_instances }}'
|
||||
|
@ -992,13 +992,13 @@
|
|||
|
||||
- name: Remove legacy ceph radosgw data
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: absent
|
||||
loop: '{{ rgw_instances }}'
|
||||
|
||||
- name: Remove legacy ceph radosgw directory
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}"
|
||||
state: absent
|
||||
|
||||
- name: Redeploy rbd-mirror daemons
|
||||
|
|
|
@ -259,7 +259,7 @@
|
|||
|
||||
- name: Stop ceph rgws with systemd
|
||||
ansible.builtin.service:
|
||||
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: stopped
|
||||
enabled: false
|
||||
failed_when: false
|
||||
|
|
|
@ -940,7 +940,7 @@
|
|||
|
||||
- name: Stop ceph rgw when upgrading from stable-3.2 # noqa: ignore-errors
|
||||
ansible.builtin.systemd:
|
||||
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
|
||||
name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}
|
||||
state: stopped
|
||||
enabled: false
|
||||
masked: true
|
||||
|
@ -948,7 +948,7 @@
|
|||
|
||||
- name: Stop ceph rgw
|
||||
ansible.builtin.systemd:
|
||||
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
|
||||
name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
|
||||
state: stopped
|
||||
enabled: false
|
||||
masked: true
|
||||
|
|
|
@ -564,7 +564,7 @@
|
|||
tasks:
|
||||
- name: Stop non-containerized ceph rgw(s)
|
||||
ansible.builtin.service:
|
||||
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: stopped
|
||||
enabled: false
|
||||
with_items: "{{ rgw_instances }}"
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
|
||||
ceph_conf_overrides:
|
||||
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
"client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
"rgw keystone api version": "2"
|
||||
"rgw keystone url": "http://192.168.0.1:35357"
|
||||
"rgw keystone admin token": "password"
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
|
||||
ceph_conf_overrides:
|
||||
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
"client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
"rgw keystone api version": "3"
|
||||
"rgw keystone url": "http://192.168.0.1:35357"
|
||||
"rgw keystone admin token": "password"
|
||||
|
|
|
@ -6,6 +6,6 @@
|
|||
# The double quotes are important, do NOT remove them.
|
||||
|
||||
ceph_conf_overrides:
|
||||
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
"client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
rgw enable static website = true
|
||||
rgw dns s3website name = objects-website-region.domain.com
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
# The double quotes are important, do NOT remove them.
|
||||
|
||||
ceph_conf_overrides:
|
||||
"client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
"client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
|
||||
rgw enable usage log = true
|
||||
rgw usage log tick interval = 30
|
||||
rgw usage log flush threshold = 1024
|
||||
|
|
|
@ -131,8 +131,8 @@
|
|||
ansible.builtin.set_fact:
|
||||
_ceph_ansible_rgw_conf: >-
|
||||
{{ _ceph_ansible_rgw_conf | default({}) | combine({
|
||||
'client.rgw.' + ansible_facts['hostname'] + '.' + item.instance_name: {
|
||||
'log_file': '/var/log/ceph/' + cluster + '-rgw-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log',
|
||||
'client.rgw.' + rgw_zone + '.' + ansible_facts['hostname'] + '.' + item.instance_name: {
|
||||
'log_file': '/var/log/ceph/' + cluster + '-rgw-' + rgw_zone + '-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log',
|
||||
'rgw_frontends': 'beast ' + _rgw_beast_endpoint + _rgw_beast_ssl_option,
|
||||
}
|
||||
}, recursive=true) }}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Create rados gateway instance directories
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: directory
|
||||
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
|
@ -10,7 +10,7 @@
|
|||
|
||||
- name: Generate environment file
|
||||
ansible.builtin.copy:
|
||||
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
|
||||
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
|
|
|
@ -360,8 +360,7 @@ radosgw_address: x.x.x.x
|
|||
radosgw_address_block: subnet
|
||||
radosgw_keystone_ssl: false # activate this when using keystone PKI keys
|
||||
radosgw_num_instances: 1
|
||||
# Rados Gateway options
|
||||
email_address: foo@bar.com
|
||||
rgw_zone: default # This is used for rgw instance client names.
|
||||
|
||||
|
||||
## Testing mode
|
||||
|
@ -437,7 +436,7 @@ ceph_rbd_mirror_pool: "rbd"
|
|||
# global:
|
||||
# foo: 1234
|
||||
# bar: 5678
|
||||
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
|
||||
# "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
|
||||
# rgw_zone: zone1
|
||||
#
|
||||
ceph_conf_overrides: {}
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
when: inventory_hostname in groups.get(mds_group_name, [])
|
||||
|
||||
- name: Check for a rgw container
|
||||
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
|
||||
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}'"
|
||||
register: ceph_rgw_container_stat
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
|
|
@ -13,13 +13,14 @@ fi
|
|||
INSTANCES_NAME=({% for i in rgw_instances %}{{ i.instance_name }} {% endfor %})
|
||||
RGW_IPS=({% for i in rgw_instances %}{{ i.radosgw_address }} {% endfor %})
|
||||
RGW_PORTS=({% for i in rgw_instances %}{{ i.radosgw_frontend_port }} {% endfor %})
|
||||
RGW_ZONE="{{ rgw_zone }}"
|
||||
declare -a DOCKER_EXECS
|
||||
declare -a SOCKET_PREFIX
|
||||
for ((i=0; i<${RGW_NUMS}; i++)); do
|
||||
SOCKET_PREFIX[i]="/var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.${INSTANCES_NAME[i]}"
|
||||
SOCKET_PREFIX[i]="/var/run/ceph/{{ cluster }}-client.rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}"
|
||||
DOCKER_EXECS[i]=""
|
||||
{% if containerized_deployment | bool %}
|
||||
DOCKER_EXECS[i]="{{ container_binary }} exec ceph-rgw-${HOST_NAME}-${INSTANCES_NAME[i]}"
|
||||
DOCKER_EXECS[i]="{{ container_binary }} exec ceph-rgw-${RGW_ZONE}-${HOST_NAME}-${INSTANCES_NAME[i]}"
|
||||
{% endif %}
|
||||
done
|
||||
|
||||
|
@ -38,7 +39,7 @@ check_socket() {
|
|||
done
|
||||
if [ $succ -ne 1 ]; then
|
||||
echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:"
|
||||
journalctl -u ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
|
||||
journalctl -u ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
@ -81,10 +82,10 @@ for ((i=0; i<${RGW_NUMS}; i++)); do
|
|||
|
||||
# Check if systemd unit exists
|
||||
# This is needed for new instances as the restart might trigger before the deployment
|
||||
if systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}"; then
|
||||
systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
|
||||
if systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}"; then
|
||||
systemctl restart ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}
|
||||
else
|
||||
echo "Systemd unit ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist."
|
||||
echo "Systemd unit ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist."
|
||||
continue
|
||||
fi
|
||||
|
||||
|
|
|
@ -80,11 +80,11 @@ rgw_config_keys: "/" # DON'T TOUCH ME
|
|||
# If you want to add parameters, you should retain the existing ones and include the new ones.
|
||||
ceph_rgw_container_params:
|
||||
volumes:
|
||||
- /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
|
||||
- /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z
|
||||
args:
|
||||
- -f
|
||||
- -n=client.rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}
|
||||
- -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
|
||||
- -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}
|
||||
- -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring
|
||||
|
||||
###########
|
||||
# SYSTEMD #
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: Restart rgw
|
||||
ansible.builtin.service:
|
||||
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: restarted
|
||||
with_items: "{{ rgw_instances }}"
|
||||
|
|
|
@ -1,13 +1,4 @@
|
|||
---
|
||||
- name: Create rados gateway directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
mode: "{{ ceph_directories_mode }}"
|
||||
with_items: "{{ rbd_client_admin_socket_path }}"
|
||||
|
||||
- name: Get keys from monitors
|
||||
ceph_key:
|
||||
name: "{{ item.name }}"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Create rados gateway directories
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
state: directory
|
||||
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
|
@ -12,11 +12,11 @@
|
|||
|
||||
- name: Create rgw keyrings
|
||||
ceph_key:
|
||||
name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
name: "client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
cluster: "{{ cluster }}"
|
||||
user: "client.bootstrap-rgw"
|
||||
user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
|
||||
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
|
||||
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
|
||||
caps:
|
||||
osd: 'allow rwx'
|
||||
mon: 'allow rw'
|
||||
|
@ -34,7 +34,7 @@
|
|||
|
||||
- name: Get keys from monitors
|
||||
ceph_key:
|
||||
name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
name: "client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
|
||||
cluster: "{{ cluster }}"
|
||||
output_format: plain
|
||||
state: info
|
||||
|
@ -51,7 +51,7 @@
|
|||
|
||||
- name: Copy ceph key(s) if needed
|
||||
ansible.builtin.copy:
|
||||
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring"
|
||||
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring"
|
||||
content: "{{ item.stdout + '\n' }}"
|
||||
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
- name: Systemd start rgw container
|
||||
ansible.builtin.systemd:
|
||||
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
|
||||
name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
|
||||
state: started
|
||||
enabled: true
|
||||
masked: false
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
- name: Start rgw instance
|
||||
ansible.builtin.systemd:
|
||||
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
|
||||
name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
|
||||
state: started
|
||||
enabled: true
|
||||
masked: false
|
||||
|
|
|
@ -14,12 +14,12 @@ Wants=network-online.target local-fs.target time-sync.target
|
|||
EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile
|
||||
{% if container_binary == 'podman' %}
|
||||
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
|
||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
|
||||
{% else %}
|
||||
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
{% endif %}
|
||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
||||
{% if container_binary == 'podman' %}
|
||||
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
|
||||
|
@ -46,14 +46,14 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
|
|||
{% if radosgw_frontend_ssl_certificate -%}
|
||||
-v {{ radosgw_frontend_ssl_certificate }}:{{ radosgw_frontend_ssl_certificate }} \
|
||||
{% endif -%}
|
||||
--name=ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} \
|
||||
--name=ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} \
|
||||
--entrypoint=/usr/bin/radosgw \
|
||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
||||
{{ (ceph_common_container_params['args'] + ceph_rgw_container_params['args'] | default([])) | join(' ') }}
|
||||
{% if container_binary == 'podman' %}
|
||||
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
|
||||
{% else %}
|
||||
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME}
|
||||
{% endif %}
|
||||
KillMode=none
|
||||
Restart=always
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
(groups.get(mgr_group_name, []) | length == 0 and inventory_hostname in groups.get(mon_group_name, []))
|
||||
|
||||
- name: Get rgw log
|
||||
ansible.builtin.shell: journalctl -l -u ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log
|
||||
ansible.builtin.shell: journalctl -l -u ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log
|
||||
changed_when: false
|
||||
with_items: "{{ rgw_instances | default([]) }}"
|
||||
when: inventory_hostname in groups.get(rgw_group_name, [])
|
||||
|
|
|
@ -13,9 +13,10 @@ class TestRGWs(object):
|
|||
|
||||
def test_rgw_service_enabled_and_running(self, node, host):
|
||||
for i in range(int(node["radosgw_num_instances"])):
|
||||
service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format(
|
||||
service_name = "ceph-radosgw@rgw.{rgw_zone}.{hostname}.rgw{seq}".format(
|
||||
hostname=node["vars"]["inventory_hostname"],
|
||||
seq=i
|
||||
seq=i,
|
||||
rgw_zone=node["vars"].get("rgw_zone", "default"),
|
||||
)
|
||||
s = host.service(service_name)
|
||||
assert s.is_enabled
|
||||
|
|
6
tox.ini
6
tox.ini
|
@ -62,6 +62,7 @@ commands=
|
|||
|
||||
# set up the cluster again
|
||||
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
|
||||
no_log_on_ceph_key_tasks=false \
|
||||
yes_i_know=true \
|
||||
ceph_docker_registry_auth=True \
|
||||
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
||||
|
@ -81,6 +82,7 @@ commands=
|
|||
|
||||
# set up the cluster again
|
||||
ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
|
||||
no_log_on_ceph_key_tasks=false \
|
||||
yes_i_know=true \
|
||||
ceph_docker_registry_auth=True \
|
||||
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
|
||||
|
@ -179,6 +181,7 @@ commands=
|
|||
commands=
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||
no_log_on_ceph_key_tasks=false \
|
||||
yes_i_know=true \
|
||||
ireallymeanit=yes \
|
||||
ceph_docker_registry_auth=True \
|
||||
|
@ -191,6 +194,7 @@ commands=
|
|||
commands=
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||
no_log_on_ceph_key_tasks=false \
|
||||
yes_i_know=true \
|
||||
ireallymeanit=yes \
|
||||
ceph_docker_registry_auth=True \
|
||||
|
@ -203,6 +207,7 @@ commands=
|
|||
commands=
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||
no_log_on_ceph_key_tasks=false \
|
||||
yes_i_know=true \
|
||||
ireallymeanit=yes \
|
||||
ceph_docker_registry_auth=True \
|
||||
|
@ -215,6 +220,7 @@ commands=
|
|||
commands=
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
|
||||
ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||
no_log_on_ceph_key_tasks=false \
|
||||
yes_i_know=true \
|
||||
ireallymeanit=yes \
|
||||
ceph_docker_registry_auth=True \
|
||||
|
|
Loading…
Reference in New Issue