ceph-osd: remove ceph-osd-run.sh script

Since we only have one scenario since nautilus then we can just move
the container start command from ceph-osd-run.sh to the systemd unit
service.
As a result, the ceph-osd-run.sh.j2 template and the
ceph_osd_docker_run_script_path variable are removed.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 829990e60d)
pull/5458/head
Dimitri Savineau 2020-06-09 15:19:57 -04:00 committed by Guillaume Abrioux
parent dcce4b1d5e
commit a99c94ea11
35 changed files with 47 additions and 111 deletions

View File

@ -190,7 +190,6 @@ dummy:
# ACTIVATE DEVICE # ACTIVATE DEVICE
# #
#ceph_osd_docker_extra_env: #ceph_osd_docker_extra_env:
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
#ceph_osd_numactl_opts: "" #ceph_osd_numactl_opts: ""
########### ###########

View File

@ -324,18 +324,6 @@
- "{{ playbook_dir }}/group_vars/osds.yml" - "{{ playbook_dir }}/group_vars/osds.yml"
skip: true skip: true
- name: find all osd_disk_prepare logs
find:
paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
pattern: "ceph-osd-prepare-*.log"
register: osd_disk_prepare_logs
- name: ensure all osd_disk_prepare logs are removed
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ osd_disk_prepare_logs.files }}"
- name: purge ceph mon cluster - name: purge ceph mon cluster
hosts: "{{ mon_group_name|default('mons') }}" hosts: "{{ mon_group_name|default('mons') }}"

View File

@ -182,7 +182,6 @@ ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
# ACTIVATE DEVICE # ACTIVATE DEVICE
# #
ceph_osd_docker_extra_env: ceph_osd_docker_extra_env:
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
ceph_osd_numactl_opts: "" ceph_osd_numactl_opts: ""
########### ###########

View File

@ -1,17 +1,5 @@
--- ---
- name: generate ceph osd docker run script
become: true
template:
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
owner: "root"
group: "root"
mode: "0744"
setype: "bin_t"
notify: restart ceph osds
- name: generate systemd unit file - name: generate systemd unit file
become: true
template: template:
src: "{{ role_path }}/templates/ceph-osd.service.j2" src: "{{ role_path }}/templates/ceph-osd.service.j2"
dest: /etc/systemd/system/ceph-osd@.service dest: /etc/systemd/system/ceph-osd@.service

View File

@ -1,55 +0,0 @@
#!/bin/bash
# {{ ansible_managed }}
########
# MAIN #
########
{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
{% if ceph_osd_numactl_opts != "" %}
numactl \
{{ ceph_osd_numactl_opts }} \
{% endif %}
/usr/bin/{{ container_binary }} run \
--rm \
--net=host \
--privileged=true \
--pid=host \
--ipc=host \
{% if osd_objectstore == 'filestore' -%}
--memory={{ ceph_osd_docker_memory_limit }} \
{% endif -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ cpu_limit }} \
{% else -%}
--cpu-quota={{ cpu_limit * 100000 }} \
{% endif -%}
{% if ceph_osd_docker_cpuset_cpus is defined -%}
--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \
{% endif -%}
{% if ceph_osd_docker_cpuset_mems is defined -%}
--cpuset-mems='{{ ceph_osd_docker_cpuset_mems }}' \
{% endif -%}
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-v /var/lib/ceph:/var/lib/ceph:z \
-v /etc/ceph:/etc/ceph:z \
-v /var/run/ceph:/var/run/ceph:z \
-v /var/run/udev/:/var/run/udev/ \
-v /var/log/ceph:/var/log/ceph:z \
{% if ansible_distribution == 'Ubuntu' -%}
--security-opt apparmor:unconfined \
{% endif -%}
{{ container_env_args }} \
-e CLUSTER={{ cluster }} \
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
{% endif -%}
-v /run/lvm/:/run/lvm/ \
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-e OSD_ID="$1" \
--name=ceph-osd-"$1" \
{{ ceph_osd_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}

View File

@ -7,12 +7,58 @@ Requires=docker.service
{% else %} {% else %}
After=network.target After=network.target
{% endif %} {% endif %}
{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
[Service] [Service]
EnvironmentFile=-/etc/environment EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i ExecStart={% if ceph_osd_numactl_opts != "" %}
numactl \
{{ ceph_osd_numactl_opts }} \
{% endif %}
/usr/bin/{{ container_binary }} run \
--rm \
--net=host \
--privileged=true \
--pid=host \
--ipc=host \
{% if osd_objectstore == 'filestore' -%}
--memory={{ ceph_osd_docker_memory_limit }} \
{% endif -%}
{% if (container_binary == 'docker' and ceph_docker_version.split('.')[0] is version('13', '>=')) or container_binary == 'podman' -%}
--cpus={{ cpu_limit }} \
{% else -%}
--cpu-quota={{ cpu_limit * 100000 }} \
{% endif -%}
{% if ceph_osd_docker_cpuset_cpus is defined -%}
--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \
{% endif -%}
{% if ceph_osd_docker_cpuset_mems is defined -%}
--cpuset-mems='{{ ceph_osd_docker_cpuset_mems }}' \
{% endif -%}
-v /dev:/dev \
-v /etc/localtime:/etc/localtime:ro \
-v /var/lib/ceph:/var/lib/ceph:z \
-v /etc/ceph:/etc/ceph:z \
-v /var/run/ceph:/var/run/ceph:z \
-v /var/run/udev/:/var/run/udev/ \
-v /var/log/ceph:/var/log/ceph:z \
{% if ansible_distribution == 'Ubuntu' -%}
--security-opt apparmor:unconfined \
{% endif -%}
{{ container_env_args }} \
-e CLUSTER={{ cluster }} \
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
{% endif -%}
-v /run/lvm/:/run/lvm/ \
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-e OSD_ID=%i \
--name=ceph-osd-%i \
{{ ceph_osd_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
KillMode=none KillMode=none
Restart=always Restart=always

View File

@ -24,5 +24,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -25,5 +25,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -25,5 +25,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -24,5 +24,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -25,5 +25,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -25,7 +25,6 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
rgw_override_bucket_index_max_shards: 16 rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400 rgw_bucket_default_quota_max_objects: 1638400
dashboard_enabled: False dashboard_enabled: False

View File

@ -33,7 +33,6 @@ rgw_create_pools:
ec_profile: myecprofile ec_profile: myecprofile
ec_k: 2 ec_k: 2
ec_m: 1 ec_m: 1
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -18,7 +18,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -27,7 +27,6 @@ rgw_create_pools:
pg_num: 16 pg_num: 16
bar: bar:
pg_num: 16 pg_num: 16
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -19,7 +19,6 @@ ceph_conf_overrides:
osd_pool_default_size: 1 osd_pool_default_size: 1
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -19,7 +19,6 @@ ceph_conf_overrides:
global: global:
mon_warn_on_pool_no_redundancy: false mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10

View File

@ -22,7 +22,6 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10

View File

@ -24,7 +24,6 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10

View File

@ -18,7 +18,6 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10

View File

@ -63,7 +63,6 @@ all:
- {name: backups, pg_num: 8, rule_name: 'replicated_rule'} - {name: backups, pg_num: 8, rule_name: 'replicated_rule'}
- {name: vms, pg_num: 8, rule_name: 'replicated_rule'} - {name: vms, pg_num: 8, rule_name: 'replicated_rule'}
- {name: volumes, pg_num: 8, rule_name: 'replicated_rule'} - {name: volumes, pg_num: 8, rule_name: 'replicated_rule'}
ceph_osd_docker_run_script_path: /opt
pools: [] pools: []
public_network: 192.168.95.0/24 public_network: 192.168.95.0/24
radosgw_address_block: 192.168.95.0/24 radosgw_address_block: 192.168.95.0/24

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -24,5 +24,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -24,5 +24,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -22,5 +22,4 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp
dashboard_enabled: False dashboard_enabled: False

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
journal_size: 100 journal_size: 100
osd_objectstore: "filestore" osd_objectstore: "filestore"
lvm_volumes: lvm_volumes:

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1

View File

@ -1,5 +1,4 @@
--- ---
ceph_osd_docker_run_script_path: /var/tmp
osd_objectstore: "bluestore" osd_objectstore: "bluestore"
lvm_volumes: lvm_volumes:
- data: data-lv1 - data: data-lv1