mirror of https://github.com/ceph/ceph-ansible.git
ceph-osd: remove ceph-osd-run.sh script
Since we only have one scenario since nautilus then we can just move
the container start command from ceph-osd-run.sh to the systemd unit
service.
As a result, the ceph-osd-run.sh.j2 template and the
ceph_osd_docker_run_script_path variable are removed.
Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 829990e60d
)
pull/5457/head
parent
e1c8a0daf6
commit
51cfb89501
|
@ -190,7 +190,6 @@ dummy:
|
||||||
# ACTIVATE DEVICE
|
# ACTIVATE DEVICE
|
||||||
#
|
#
|
||||||
#ceph_osd_docker_extra_env:
|
#ceph_osd_docker_extra_env:
|
||||||
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
|
||||||
#ceph_osd_numactl_opts: ""
|
#ceph_osd_numactl_opts: ""
|
||||||
|
|
||||||
###########
|
###########
|
||||||
|
|
|
@ -323,18 +323,6 @@
|
||||||
- "{{ playbook_dir }}/group_vars/osds.yml"
|
- "{{ playbook_dir }}/group_vars/osds.yml"
|
||||||
skip: true
|
skip: true
|
||||||
|
|
||||||
- name: find all osd_disk_prepare logs
|
|
||||||
find:
|
|
||||||
paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
|
|
||||||
pattern: "ceph-osd-prepare-*.log"
|
|
||||||
register: osd_disk_prepare_logs
|
|
||||||
|
|
||||||
- name: ensure all osd_disk_prepare logs are removed
|
|
||||||
file:
|
|
||||||
path: "{{ item.path }}"
|
|
||||||
state: absent
|
|
||||||
with_items: "{{ osd_disk_prepare_logs.files }}"
|
|
||||||
|
|
||||||
- name: purge ceph mon cluster
|
- name: purge ceph mon cluster
|
||||||
|
|
||||||
hosts: "{{ mon_group_name|default('mons') }}"
|
hosts: "{{ mon_group_name|default('mons') }}"
|
||||||
|
|
|
@ -182,7 +182,6 @@ ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||||
# ACTIVATE DEVICE
|
# ACTIVATE DEVICE
|
||||||
#
|
#
|
||||||
ceph_osd_docker_extra_env:
|
ceph_osd_docker_extra_env:
|
||||||
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
|
||||||
ceph_osd_numactl_opts: ""
|
ceph_osd_numactl_opts: ""
|
||||||
|
|
||||||
###########
|
###########
|
||||||
|
|
|
@ -1,17 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: generate ceph osd docker run script
|
|
||||||
become: true
|
|
||||||
template:
|
|
||||||
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
|
|
||||||
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
|
|
||||||
owner: "root"
|
|
||||||
group: "root"
|
|
||||||
mode: "0744"
|
|
||||||
setype: "bin_t"
|
|
||||||
notify: restart ceph osds
|
|
||||||
|
|
||||||
- name: generate systemd unit file
|
- name: generate systemd unit file
|
||||||
become: true
|
|
||||||
template:
|
template:
|
||||||
src: "{{ role_path }}/templates/ceph-osd.service.j2"
|
src: "{{ role_path }}/templates/ceph-osd.service.j2"
|
||||||
dest: /etc/systemd/system/ceph-osd@.service
|
dest: /etc/systemd/system/ceph-osd@.service
|
||||||
|
|
|
@ -1,51 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# {{ ansible_managed }}
|
|
||||||
|
|
||||||
|
|
||||||
########
|
|
||||||
# MAIN #
|
|
||||||
########
|
|
||||||
{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
|
|
||||||
|
|
||||||
{% if ceph_osd_numactl_opts != "" %}
|
|
||||||
numactl \
|
|
||||||
{{ ceph_osd_numactl_opts }} \
|
|
||||||
{% endif %}
|
|
||||||
/usr/bin/{{ container_binary }} run \
|
|
||||||
--rm \
|
|
||||||
--net=host \
|
|
||||||
--privileged=true \
|
|
||||||
--pid=host \
|
|
||||||
--ipc=host \
|
|
||||||
{% if osd_objectstore == 'filestore' -%}
|
|
||||||
--memory={{ ceph_osd_docker_memory_limit }} \
|
|
||||||
{% endif -%}
|
|
||||||
--cpus={{ cpu_limit }} \
|
|
||||||
{% if ceph_osd_docker_cpuset_cpus is defined -%}
|
|
||||||
--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \
|
|
||||||
{% endif -%}
|
|
||||||
{% if ceph_osd_docker_cpuset_mems is defined -%}
|
|
||||||
--cpuset-mems='{{ ceph_osd_docker_cpuset_mems }}' \
|
|
||||||
{% endif -%}
|
|
||||||
-v /dev:/dev \
|
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
|
||||||
-v /var/lib/ceph:/var/lib/ceph:z \
|
|
||||||
-v /etc/ceph:/etc/ceph:z \
|
|
||||||
-v /var/run/ceph:/var/run/ceph:z \
|
|
||||||
-v /var/run/udev/:/var/run/udev/ \
|
|
||||||
-v /var/log/ceph:/var/log/ceph:z \
|
|
||||||
{% if ansible_distribution == 'Ubuntu' -%}
|
|
||||||
--security-opt apparmor:unconfined \
|
|
||||||
{% endif -%}
|
|
||||||
{{ container_env_args }} \
|
|
||||||
-e CLUSTER={{ cluster }} \
|
|
||||||
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
|
||||||
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
|
||||||
{% endif -%}
|
|
||||||
-v /run/lvm/:/run/lvm/ \
|
|
||||||
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
|
||||||
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
|
||||||
-e OSD_ID="$1" \
|
|
||||||
--name=ceph-osd-"$1" \
|
|
||||||
{{ ceph_osd_docker_extra_env }} \
|
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
|
|
@ -7,12 +7,54 @@ Requires=docker.service
|
||||||
{% else %}
|
{% else %}
|
||||||
After=network.target
|
After=network.target
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
||||||
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
|
ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
|
||||||
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
|
ExecStart={% if ceph_osd_numactl_opts != "" %}
|
||||||
|
numactl \
|
||||||
|
{{ ceph_osd_numactl_opts }} \
|
||||||
|
{% endif %}
|
||||||
|
/usr/bin/{{ container_binary }} run \
|
||||||
|
--rm \
|
||||||
|
--net=host \
|
||||||
|
--privileged=true \
|
||||||
|
--pid=host \
|
||||||
|
--ipc=host \
|
||||||
|
{% if osd_objectstore == 'filestore' -%}
|
||||||
|
--memory={{ ceph_osd_docker_memory_limit }} \
|
||||||
|
{% endif -%}
|
||||||
|
--cpus={{ cpu_limit }} \
|
||||||
|
{% if ceph_osd_docker_cpuset_cpus is defined -%}
|
||||||
|
--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \
|
||||||
|
{% endif -%}
|
||||||
|
{% if ceph_osd_docker_cpuset_mems is defined -%}
|
||||||
|
--cpuset-mems='{{ ceph_osd_docker_cpuset_mems }}' \
|
||||||
|
{% endif -%}
|
||||||
|
-v /dev:/dev \
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
|
-v /var/lib/ceph:/var/lib/ceph:z \
|
||||||
|
-v /etc/ceph:/etc/ceph:z \
|
||||||
|
-v /var/run/ceph:/var/run/ceph:z \
|
||||||
|
-v /var/run/udev/:/var/run/udev/ \
|
||||||
|
-v /var/log/ceph:/var/log/ceph:z \
|
||||||
|
{% if ansible_distribution == 'Ubuntu' -%}
|
||||||
|
--security-opt apparmor:unconfined \
|
||||||
|
{% endif -%}
|
||||||
|
{{ container_env_args }} \
|
||||||
|
-e CLUSTER={{ cluster }} \
|
||||||
|
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
|
||||||
|
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
|
||||||
|
{% endif -%}
|
||||||
|
-v /run/lvm/:/run/lvm/ \
|
||||||
|
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
|
||||||
|
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
||||||
|
-e OSD_ID=%i \
|
||||||
|
--name=ceph-osd-%i \
|
||||||
|
{{ ceph_osd_docker_extra_env }} \
|
||||||
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
|
||||||
KillMode=none
|
KillMode=none
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -26,7 +26,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
rgw_override_bucket_index_max_shards: 16
|
rgw_override_bucket_index_max_shards: 16
|
||||||
rgw_bucket_default_quota_max_objects: 1638400
|
rgw_bucket_default_quota_max_objects: 1638400
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -35,7 +35,6 @@ rgw_create_pools:
|
||||||
ec_profile: myecprofile
|
ec_profile: myecprofile
|
||||||
ec_k: 2
|
ec_k: 2
|
||||||
ec_m: 1
|
ec_m: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -20,7 +20,6 @@ ceph_conf_overrides:
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -28,7 +28,6 @@ rgw_create_pools:
|
||||||
pg_num: 16
|
pg_num: 16
|
||||||
bar:
|
bar:
|
||||||
pg_num: 16
|
pg_num: 16
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -20,7 +20,6 @@ ceph_conf_overrides:
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -20,7 +20,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
|
@ -24,7 +24,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
|
@ -26,7 +26,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
|
@ -20,7 +20,6 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
||||||
handler_health_mon_check_delay: 10
|
handler_health_mon_check_delay: 10
|
||||||
handler_health_osd_check_delay: 10
|
handler_health_osd_check_delay: 10
|
|
@ -64,7 +64,6 @@ all:
|
||||||
- {name: backups, pg_num: 8, rule_name: 'replicated_rule'}
|
- {name: backups, pg_num: 8, rule_name: 'replicated_rule'}
|
||||||
- {name: vms, pg_num: 8, rule_name: 'replicated_rule'}
|
- {name: vms, pg_num: 8, rule_name: 'replicated_rule'}
|
||||||
- {name: volumes, pg_num: 8, rule_name: 'replicated_rule'}
|
- {name: volumes, pg_num: 8, rule_name: 'replicated_rule'}
|
||||||
ceph_osd_docker_run_script_path: /opt
|
|
||||||
pools: []
|
pools: []
|
||||||
public_network: 192.168.95.0/24
|
public_network: 192.168.95.0/24
|
||||||
radosgw_address_block: 192.168.95.0/24
|
radosgw_address_block: 192.168.95.0/24
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -26,5 +26,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -24,5 +24,4 @@ ceph_conf_overrides:
|
||||||
mon_allow_pool_size_one: true
|
mon_allow_pool_size_one: true
|
||||||
mon_warn_on_pool_no_redundancy: false
|
mon_warn_on_pool_no_redundancy: false
|
||||||
osd_pool_default_size: 1
|
osd_pool_default_size: 1
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
dashboard_enabled: False
|
dashboard_enabled: False
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
journal_size: 100
|
journal_size: 100
|
||||||
osd_objectstore: "filestore"
|
osd_objectstore: "filestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
||||||
osd_objectstore: "bluestore"
|
osd_objectstore: "bluestore"
|
||||||
lvm_volumes:
|
lvm_volumes:
|
||||||
- data: data-lv1
|
- data: data-lv1
|
||||||
|
|
Loading…
Reference in New Issue