diff --git a/Vagrantfile b/Vagrantfile index cc45504c3..b10d9396f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -77,7 +77,6 @@ ansible_provision = proc do |ansible| containerized_deployment: 'true', monitor_interface: ETH, ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24", - ceph_osd_docker_devices: settings['disks'], devices: settings['disks'], ceph_docker_on_openstack: BOX == 'openstack', ceph_rgw_civetweb_port: 8080, diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 55f66daa8..777a5615b 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -227,40 +227,22 @@ dummy: #ceph_config_keys: [] # DON'T TOUCH ME # PREPARE DEVICE -# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly. -# This is why we use [0] in the example. # # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # -# Examples: -# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1 -# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 -# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 -# -# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} -# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 -# -# #ceph_osd_docker_devices: "{{ devices }}" -#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 +#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} # ACTIVATE DEVICE -# Examples: -# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1 -# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1 # -#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} +#ceph_osd_docker_extra_env: #ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command ########### # SYSTEMD # ########### + # ceph_osd_systemd_overrides will override the systemd settings # for the ceph-osd services. # For example,to set "PrivateDevices=false" you can specify: diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index 4558fc30f..7941752bd 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -240,7 +240,7 @@ name: "ceph-osd@{{ item | basename }}" state: stopped enabled: no - with_items: "{{ ceph_osd_docker_devices }}" + with_items: "{{ devices }}" ignore_errors: true - name: remove ceph osd prepare container @@ -248,7 +248,7 @@ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" name: "ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}" state: absent - with_items: "{{ ceph_osd_docker_devices }}" + with_items: "{{ devices }}" ignore_errors: true - name: remove ceph osd container @@ -256,7 +256,7 @@ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}" state: absent - with_items: "{{ ceph_osd_docker_devices }}" + with_items: "{{ devices }}" ignore_errors: true - name: zap ceph osd disks @@ -269,7 +269,7 @@ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ zap_device with_items: - - "{{ ceph_osd_docker_devices }}" + - "{{ devices }}" - "{{ dedicated_devices|default([]) }}" - name: wait until the zap containers die @@ -287,7 +287,7 @@ name: "ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}" state: absent with_items: - - "{{ ceph_osd_docker_devices }}" + - "{{ devices }}" - "{{ dedicated_devices|default([]) }}" - name: remove ceph osd service diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 535c2201d..e569639b4 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -267,7 +267,7 @@ name: ceph-osd@{{ item | basename }} state: restarted enabled: yes - with_items: "{{ ceph_osd_docker_devices }}" + with_items: "{{ devices }}" when: - ansible_service_mgr == 'systemd' - containerized_deployment diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 746f06595..6203ca29b 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -178,7 +178,7 @@ - name: collect osd devices shell: | blkid | awk '/ceph data/ { sub ("1:", "", $1); print $1 }' - register: ceph_osd_docker_devices + register: devices changed_when: false - name: stop non-containerized ceph osd(s) @@ -223,7 +223,7 @@ docker ps | grep -sq {{ item | regex_replace('/', '') }} changed_when: false failed_when: false - with_items: "{{ ceph_osd_docker_devices.stdout_lines }}" + with_items: "{{ devices.stdout_lines }}" register: osd_running - name: unmount all the osd directories @@ -231,12 +231,12 @@ changed_when: false failed_when: false with_together: - - "{{ ceph_osd_docker_devices.stdout_lines }}" + - "{{ devices.stdout_lines }}" - "{{ osd_running.results }}" when: - item.1.get("rc", 0) != 0 - - set_fact: ceph_osd_docker_devices={{ ceph_osd_docker_devices.stdout_lines }} + - set_fact: devices={{ devices.stdout_lines }} roles: - ceph-defaults diff --git a/roles/ceph-docker-common/tasks/fetch_configs.yml b/roles/ceph-docker-common/tasks/fetch_configs.yml index d36178274..9b7385730 100644 --- a/roles/ceph-docker-common/tasks/fetch_configs.yml +++ b/roles/ceph-docker-common/tasks/fetch_configs.yml @@ -56,5 +56,5 @@ changed_when: false with_together: - "{{ ceph_config_keys }}" - - "{{ statconfig.results }}" + - "{{ statconfig.results | default([]) }}" when: item.1.stat.exists == true diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index d6d3511ff..2c6588541 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -219,40 +219,22 @@ lvm_volumes: [] ceph_config_keys: [] # DON'T TOUCH ME # PREPARE DEVICE -# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly. -# This is why we use [0] in the example. # # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # -# Examples: -# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1 -# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 -# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 -# -# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} -# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1 -# -# ceph_osd_docker_devices: "{{ devices }}" -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 +ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} # ACTIVATE DEVICE -# Examples: -# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1 -# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 -# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1 # -ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} +ceph_osd_docker_extra_env: ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command ########### # SYSTEMD # ########### + # ceph_osd_systemd_overrides will override the systemd settings # for the ceph-osd services. # For example,to set "PrivateDevices=false" you can specify: diff --git a/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml new file mode 100644 index 000000000..2906095e3 --- /dev/null +++ b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml @@ -0,0 +1,81 @@ +--- +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" + when: + - osd_objectstore == 'bluestore' + - not dmcrypt + - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + - not containerized_deployment + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --filestore" + when: + - osd_objectstore == 'filestore' + - not dmcrypt + - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + - not containerized_deployment + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }}" + when: + - osd_objectstore == 'filestore' + - not dmcrypt + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous + - not containerized_deployment + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt" + when: + - osd_objectstore == 'bluestore' + - dmcrypt + - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + - not containerized_deployment + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt" + when: + - osd_objectstore == 'filestore' + - dmcrypt + - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + - not containerized_deployment + +- set_fact: + ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" + when: + - osd_objectstore == 'filestore' + - dmcrypt + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous + - not containerized_deployment + +- set_fact: + docker_env_args: -e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }} + when: + - containerized_deployment_with_kv + +- set_fact: + docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0 + when: + - containerized_deployment + - osd_objectstore == 'filestore' + - not dmcrypt + +- set_fact: + docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1 + when: + - containerized_deployment + - osd_objectstore == 'filestore' + - dmcrypt + +- set_fact: + docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0 + when: + - containerized_deployment + - osd_objectstore == 'bluestore' + - not dmcrypt + +- set_fact: + docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1 + when: + - containerized_deployment + - osd_objectstore == 'bluestore' + - dmcrypt diff --git a/roles/ceph-osd/tasks/check_devices.yml b/roles/ceph-osd/tasks/check_devices.yml index 8d68cfd38..36bdfa092 100644 --- a/roles/ceph-osd/tasks/check_devices.yml +++ b/roles/ceph-osd/tasks/check_devices.yml @@ -40,10 +40,10 @@ always_run: true register: journal_partition_status when: - - osd_scenario == 'non-collocated' - item.0.rc != 0 + - osd_scenario == 'non-collocated' -- name: fix partitions gpt header or labels of the journal devices +- name: fix partitions gpt header or labels of the journal device(s) shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}" with_together: - "{{ journal_partition_status.results }}" @@ -51,5 +51,18 @@ changed_when: false when: - not item.0.get("skipped") - - osd_scenario == 'non-collocated' - item.0.get("rc", 0) != 0 + - osd_scenario == 'non-collocated' + - not containerized_deployment + +- name: create gpt disk label of the journal device(s) + command: parted --script {{ item.1 }} mklabel gpt + with_together: + - "{{ osd_partition_status_results.results }}" + - "{{ dedicated_devices|unique }}" + changed_when: false + when: + - not item.0.get("skipped") + - item.0.get("rc", 0) != 0 + - osd_scenario == 'non-collocated' + - containerized_deployment diff --git a/roles/ceph-osd/tasks/check_devices_auto.yml b/roles/ceph-osd/tasks/check_devices_auto.yml index e05f96dfd..7240b67f0 100644 --- a/roles/ceph-osd/tasks/check_devices_auto.yml +++ b/roles/ceph-osd/tasks/check_devices_auto.yml @@ -54,10 +54,27 @@ changed_when: false when: - ansible_devices is defined - - item.0.item.value.removable == 0 - - item.0.item.value.partitions|count == 0 - - item.0.item.value.holders|count == 0 - - item.0.rc != 0 + - not item.0.get("skipped") + - item.0.get("rc", 0) != 0 + - item.1.value.removable == 0 + - item.1.value.partitions|count == 0 + - item.1.value.holders|count == 0 + - not containerized_deployment + +- name: create gpt disk label + command: parted --script {{ item.1 }} mklabel gpt + with_together: + - "{{ osd_partition_status_results.results }}" + - "{{ ansible_devices }}" + changed_when: false + when: + - ansible_devices is defined + - not item.0.get("skipped") + - item.0.get("rc", 0) != 0 + - item.1.value.removable == 0 + - item.1.value.partitions|count == 0 + - item.1.value.holders|count == 0 + - containerized_deployment - name: check if a partition named 'ceph' exists (autodiscover disks) shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'" diff --git a/roles/ceph-osd/tasks/check_devices_static.yml b/roles/ceph-osd/tasks/check_devices_static.yml index cdcaff04e..9d82ce335 100644 --- a/roles/ceph-osd/tasks/check_devices_static.yml +++ b/roles/ceph-osd/tasks/check_devices_static.yml @@ -32,6 +32,18 @@ when: - not item.0.get("skipped") - item.0.get("rc", 0) != 0 + - not containerized_deployment + +- name: create gpt disk label + command: parted --script {{ item.1 }} mklabel gpt + with_together: + - "{{ osd_partition_status_results.results }}" + - "{{ devices }}" + changed_when: false + when: + - not item.0.get("skipped") + - item.0.get("rc", 0) != 0 + - containerized_deployment - name: check if a partition named 'ceph' exists shell: "parted --script {{ item.1 }} print | egrep -sq '^ 1.*ceph'" diff --git a/roles/ceph-osd/tasks/docker/start_docker_osd.yml b/roles/ceph-osd/tasks/docker/start_docker_osd.yml index 13e248fde..27039092c 100644 --- a/roles/ceph-osd/tasks/docker/start_docker_osd.yml +++ b/roles/ceph-osd/tasks/docker/start_docker_osd.yml @@ -9,62 +9,6 @@ state: unmounted when: ceph_docker_on_openstack -- name: verify if the disk was already prepared - shell: "lsblk -o PARTLABEL {{ item }} | grep -sq 'ceph'" - failed_when: false - always_run: true - with_items: "{{ ceph_osd_docker_devices }}" - register: osd_prepared - -# use shell rather than docker module -# to ensure osd disk prepare finishes before -# starting the next task -- name: prepare ceph osd disk - shell: | - docker run --net=host \ - --pid=host \ - --privileged=true \ - --name="ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.0 | regex_replace('/', '') }}" \ - -v /etc/ceph:/etc/ceph \ - -v /var/lib/ceph/:/var/lib/ceph/ \ - -v /dev:/dev \ - -v /etc/localtime:/etc/localtime:ro \ - -e "OSD_DEVICE={{ item.0 }}" \ - -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ - {{ ceph_osd_docker_prepare_env }} \ - "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" - with_together: - - "{{ ceph_osd_docker_devices }}" - - "{{ osd_prepared.results }}" - when: - - item.1.get("rc", 0) != 0 - - ceph_osd_docker_prepare_env is defined - - not containerized_deployment_with_kv - -- name: prepare ceph osd disk with kv_store - shell: | - docker run --net=host \ - --pid=host \ - --privileged=true \ - --name="ceph-osd-prepare-{{ ansible_hostname }}-dev-{{ item.0 | regex_replace('/', '') }}" \ - -v /dev:/dev \ - -v /etc/localtime:/etc/localtime:ro \ - -e "OSD_DEVICE={{ item.0 }}" \ - -e "{{ ceph_osd_docker_prepare_env }}" \ - -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ - -e KV_TYPE={{kv_type}} \ - -e KV_IP={{kv_endpoint}} \ - -e KV_PORT={{kv_port}} \ - {{ ceph_osd_docker_prepare_env }} \ - "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" \ - with_together: - - "{{ ceph_osd_docker_devices }}" - - "{{ osd_prepared.results }}" - when: - - item.1.get("rc", 0) != 0 - - ceph_osd_docker_prepare_env is defined - - containerized_deployment_with_kv - - name: generate ceph osd docker run script become: true template: @@ -86,7 +30,7 @@ - name: enable systemd unit file for osd instance shell: systemctl enable ceph-osd@{{ item | basename }}.service changed_when: false - with_items: "{{ ceph_osd_docker_devices }}" + with_items: "{{ devices }}" - name: reload systemd unit files shell: systemctl daemon-reload @@ -98,4 +42,4 @@ state: started enabled: yes changed_when: false - with_items: "{{ ceph_osd_docker_devices }}" + with_items: "{{ devices }}" diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 1784e27fc..457d23e93 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -6,17 +6,19 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False +- include: ceph_disk_cli_options_facts.yml + +- include: check_devices.yml + - include: ./scenarios/collocated.yml when: - osd_scenario == 'collocated' - - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False - include: ./scenarios/non-collocated.yml when: - osd_scenario == 'non-collocated' - - not containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False diff --git a/roles/ceph-osd/tasks/scenarios/collocated.yml b/roles/ceph-osd/tasks/scenarios/collocated.yml index b4119abeb..ff79d34bc 100644 --- a/roles/ceph-osd/tasks/scenarios/collocated.yml +++ b/roles/ceph-osd/tasks/scenarios/collocated.yml @@ -1,63 +1,59 @@ --- -## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE - -- include: ../check_devices.yml - -# NOTE (leseb): the prepare process must be parallelized somehow... -# if you have 64 disks with 4TB each, this will take a while -# since Ansible will sequential process the loop - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" +# use shell rather than docker module +# to ensure osd disk prepare finishes before +# starting the next task +- name: prepare ceph containerized osd disk collocated + shell: | + docker run --net=host \ + --pid=host \ + --privileged=true \ + --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \ + -v /etc/ceph:/etc/ceph \ + -v /var/lib/ceph/:/var/lib/ceph/ \ + -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ + -e CLUSTER={{ cluster }} \ + -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ + -e OSD_DEVICE={{ item.1 }} \ + {{ docker_env_args }} \ + {{ ceph_osd_docker_prepare_env }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + with_together: + - "{{ parted_results.results }}" + - "{{ devices }}" when: - - osd_objectstore == 'bluestore' - - osd_scenario == 'collocated' - - not dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous + - not item.0.get("skipped") + - not osd_auto_discovery + - containerized_deployment -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --filestore" +- name: automatic prepare ceph containerized osd disk collocated + shell: | + docker run --net=host \ + --pid=host \ + --privileged=true \ + --name=ceph-osd-prepare-{{ ansible_hostname }}-devdev{{ item.key }} \ + -v /etc/ceph:/etc/ceph \ + -v /var/lib/ceph/:/var/lib/ceph/ \ + -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ + -e CLUSTER={{ cluster }} \ + -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ + -e OSD_DEVICE=/dev/{{ item.key }} \ + {{ docker_env_args }} \ + {{ ceph_osd_docker_prepare_env }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + with_dict: "{{ ansible_devices }}" when: - - osd_objectstore == 'filestore' - - osd_scenario == 'collocated' - - not dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }}" - when: - - osd_objectstore == 'filestore' - - osd_scenario == 'collocated' - - not dmcrypt - - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt" - when: - - osd_objectstore == 'bluestore' - - osd_scenario == 'collocated' - - dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt" - when: - - osd_objectstore == 'filestore' - - osd_scenario == 'collocated' - - dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" - when: - - osd_objectstore == 'filestore' - - osd_scenario == 'collocated' - - dmcrypt - - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous + - ansible_devices is defined + - item.value.removable == "0" + - item.value.partitions|count == 0 + - item.value.holders|count == 0 + - osd_auto_discovery + - containerized_deployment # NOTE (alahouze): if the device is a partition, the parted command below has # failed, this is why we check if the device is a partition too. -- name: automatic prepare "{{ osd_objectstore }}" osd disk(s) without partitions with collocated osd data and journal +- name: automatic prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) without partitions with collocated osd data and journal command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}" register: prepared_osds with_dict: "{{ ansible_devices }}" @@ -67,8 +63,9 @@ - item.value.partitions|count == 0 - item.value.holders|count == 0 - osd_auto_discovery + - not containerized_deployment -- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with collocated osd data and journal +- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}" with_together: - "{{ parted_results.results }}" @@ -80,5 +77,6 @@ - item.0.get("rc", 0) != 0 - item.1.get("rc", 0) != 0 - not osd_auto_discovery + - not containerized_deployment - include: ../activate_osds.yml diff --git a/roles/ceph-osd/tasks/scenarios/non-collocated.yml b/roles/ceph-osd/tasks/scenarios/non-collocated.yml index 5a181c7fa..9a744300c 100644 --- a/roles/ceph-osd/tasks/scenarios/non-collocated.yml +++ b/roles/ceph-osd/tasks/scenarios/non-collocated.yml @@ -1,53 +1,64 @@ --- -- include: ../check_devices.yml - -# NOTE (leseb): the prepare process must be parallelized somehow... -# if you have 64 disks with 4TB each, this will take a while -# since Ansible will sequential process the loop - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore" +# use shell rather than docker module +# to ensure osd disk prepare finishes before +# starting the next task +- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated + shell: | + docker run --net=host \ + --pid=host \ + --privileged=true \ + --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \ + -v /etc/ceph:/etc/ceph \ + -v /var/lib/ceph/:/var/lib/ceph/ \ + -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ + -e CLUSTER={{ cluster }} \ + -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ + -e OSD_DEVICE={{ item.1 }} \ + -e OSD_JOURNAL={{ item.2 }} \ + {{ docker_env_args }} \ + {{ ceph_osd_docker_prepare_env }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + with_together: + - "{{ parted_results.results }}" + - "{{ devices }}" + - "{{ dedicated_devices }}" when: + - not item.0.get("skipped") + - not osd_auto_discovery + - containerized_deployment + - osd_objectstore == 'filestore' + +- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal + shell: | + docker run --net=host \ + --pid=host \ + --privileged=true \ + --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \ + -v /etc/ceph:/etc/ceph \ + -v /var/lib/ceph/:/var/lib/ceph/ \ + -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ + -e CLUSTER={{ cluster }} \ + -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \ + -e OSD_DEVICE={{ item.1 }} \ + -e OSD_BLUESTORE_BLOCK_DB={{ item.2 }} \ + -e OSD_BLUESTORE_BLOCK_WAL={{ item.3 }} \ + {{ docker_env_args }} \ + {{ ceph_osd_docker_prepare_env }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + with_together: + - "{{ parted_results.results }}" + - "{{ devices }}" + - "{{ dedicated_devices }}" + - "{{ bluestore_wal_devices }}" + when: + - not item.0.get("skipped") + - not osd_auto_discovery + - containerized_deployment - osd_objectstore == 'bluestore' - - not dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --filestore" - when: - - osd_objectstore == 'filestore' - - not dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }}" - when: - - osd_objectstore == 'filestore' - - not dmcrypt - - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt" - when: - - osd_objectstore == 'bluestore' - - dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt" - when: - - osd_objectstore == 'filestore' - - dmcrypt - - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous - -- set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" - when: - - osd_objectstore == 'filestore' - - dmcrypt - - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous - -- name: prepare filestore osd disk(s) non-collocated +- name: prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) non-collocated command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}" with_together: - "{{ parted_results.results }}" @@ -60,9 +71,9 @@ - not item.1.get("skipped") - item.1.get("rc", 0) != 0 - osd_objectstore == 'filestore' - - not osd_auto_discovery + - not containerized_deployment -- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with a dedicated device for db and wal +- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with a dedicated device for db and wal command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}" with_together: - "{{ parted_results.results }}" @@ -73,6 +84,6 @@ - not item.0.get("skipped") - item.0.get("rc", 0) != 0 - osd_objectstore == 'bluestore' - - not osd_auto_discovery + - not containerized_deployment - include: ../activate_osds.yml diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index fd00f69f3..1ac606f53 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -2,22 +2,47 @@ # {{ ansible_managed }} +############# +# VARIABLES # +############# + +REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + + ############# # FUNCTIONS # ############# -function create_dev_list { - local regex + +function expose_devices { local disks - regex="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" # we use the prepare container to find the partitions to expose - disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${regex} | uniq) + disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq) for disk in $disks; do - DEVICES="--device $disk " + DEVICES="--device=$disk " done } -create_dev_list $1 +function expose_partitions { + local partition + for partition in Block.wal Block.db Journal; do + if docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo "$partition is GPT partition"; then + if [[ "$partition" == "Block.wal" ]]; then + part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq) + DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_WAL=$part" + elif [[ "$partition" == "Block.db" ]]; then + part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq) + DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_DB=$part" + elif [[ "$partition" == "Journal" ]]; then + part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq) + DOCKER_ENV="$DOCKER_ENV -e OSD_JOURNAL=$part" + fi + fi + done +} + +#expose_devices $1 +expose_partitions $1 ######## @@ -28,29 +53,40 @@ create_dev_list $1 --rm \ --net=host \ --privileged=true \ + --pid=host \ + -v /dev:/dev \ + -v /etc/localtime:/etc/localtime:ro \ + -v /var/lib/ceph:/var/lib/ceph \ + -v /etc/ceph:/etc/ceph \ + $DOCKER_ENV \ {% if ansible_distribution == 'Ubuntu' -%} --security-opt apparmor:unconfined \ {% endif -%} - --pid=host \ {% if not containerized_deployment_with_kv -%} - -v /var/lib/ceph:/var/lib/ceph \ - -v /etc/ceph:/etc/ceph \ {% else -%} - -e KV_TYPE={{kv_type}} \ - -e KV_IP={{kv_endpoint}} \ - -e KV_PORT={{kv_port}} \ + -e KV_TYPE={{ kv_type }} \ + -e KV_IP={{ kv_endpoint }} \ + -e KV_PORT={{ kv_port }} \ {% endif -%} - -v /dev:/dev \ - -v /etc/localtime:/etc/localtime:ro \ - --device=/dev/${1} \ - --device=/dev/${1}1 \ - {% if dedicated_devices|length > 0 -%} - -e OSD_JOURNAL={{ dedicated_devices[0] }} \ - {% else -%} - --device=/dev/${1}2 \ + {% if osd_objectstore == 'filestore' and not dmcrypt -%} + -e OSD_FILESTORE=1 \ + -e OSD_DMCRYPT=0 \ {% endif -%} + {% if osd_objectstore == 'filestore' and dmcrypt -%} + -e OSD_FILESTORE=1 \ + -e OSD_DMCRYPT=1 \ + {% endif -%} + {% if osd_objectstore == 'bluestore' and not dmcrypt -%} + -e OSD_BLUESTORE=1 \ + -e OSD_DMCRYPT=0 \ + {% endif -%} + {% if osd_objectstore == 'bluestore' and dmcrypt -%} + -e OSD_BLUESTORE=1 \ + -e OSD_DMCRYPT=1 \ + {% endif -%} + -e CLUSTER={{ cluster }} \ -e OSD_DEVICE=/dev/${1} \ + -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ {{ ceph_osd_docker_extra_env }} \ --name=ceph-osd-{{ ansible_hostname }}-dev${1} \ - -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} diff --git a/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all index 6b701dc86..ddd17de7b 100644 --- a/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all @@ -8,7 +8,7 @@ monitor_interface: eth1 radosgw_interface: eth1 osd_scenario: collocated dmcrypt: true -osd_objectstore: "bluestore" +osd_objectstore: bluestore devices: - '/dev/sda' - '/dev/sdb' diff --git a/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all index 7b96a4f7e..507ab648b 100644 --- a/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all @@ -10,7 +10,7 @@ monitor_interface: eth1 radosgw_interface: eth1 osd_scenario: collocated dmcrypt: true -osd_objectstore: "bluestore" +osd_objectstore: bluestore devices: - '/dev/sda' - '/dev/sdb' @@ -24,5 +24,4 @@ ceph_conf_overrides: osd: bluestore block db size = 67108864 bluestore block wal size = 1048576000 -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 +ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 diff --git a/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all b/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all index 44232c7c6..2306ba6ee 100644 --- a/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all @@ -14,11 +14,11 @@ ceph_docker_on_openstack: False public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" ceph_rgw_civetweb_port: 8080 -ceph_osd_docker_devices: "{{ devices }}" +osd_scenario: non-collocated +osd_objectstore: bluestore devices: - /dev/sda dedicated_devices: - /dev/sdb -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE=1 -ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1 +ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1 ceph_osd_docker_run_script_path: /var/tmp diff --git a/tests/functional/centos/7/bs-docker/group_vars/all b/tests/functional/centos/7/bs-docker/group_vars/all index 6f34d9eae..94a47e260 100644 --- a/tests/functional/centos/7/bs-docker/group_vars/all +++ b/tests/functional/centos/7/bs-docker/group_vars/all @@ -15,9 +15,7 @@ public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" osd_scenario: collocated ceph_rgw_civetweb_port: 8080 -ceph_osd_docker_devices: "{{ devices }}" -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1 -ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1 +ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1 devices: - /dev/sda - /dev/sdb diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index e90587a26..7434438fc 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -6,7 +6,7 @@ public_network: "192.168.1.0/24" cluster_network: "192.168.2.0/24" journal_size: 100 radosgw_interface: eth1 -osd_objectstore: "filestore" +osd_objectstore: filestore devices: - '/dev/sda' dedicated_devices: diff --git a/tests/functional/centos/7/crypt-ded-jrn/group_vars/all b/tests/functional/centos/7/crypt-ded-jrn/group_vars/all index ef1fa393c..2e855dbbd 100644 --- a/tests/functional/centos/7/crypt-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/crypt-ded-jrn/group_vars/all @@ -7,7 +7,8 @@ journal_size: 100 monitor_interface: eth1 radosgw_interface: eth1 osd_scenario: non-collocated -osd_objectstore: "filestore" +dmcrypt: true +osd_objectstore: filestore devices: - '/dev/sda' dedicated_devices: diff --git a/tests/functional/centos/7/crypt-jrn-col/group_vars/all b/tests/functional/centos/7/crypt-jrn-col/group_vars/all index e7c2134de..87b9eb1d2 100644 --- a/tests/functional/centos/7/crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/crypt-jrn-col/group_vars/all @@ -7,7 +7,8 @@ journal_size: 100 monitor_interface: eth1 radosgw_interface: eth1 osd_scenario: collocated -osd_objectstore: "filestore" +osd_objectstore: filestore +dmcrypt: true devices: - '/dev/sda' - '/dev/sdb' diff --git a/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all b/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all index b042cb62d..7bd64b54a 100644 --- a/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all +++ b/tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all @@ -14,11 +14,10 @@ ceph_docker_on_openstack: False public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" osd_scenario: collocated +osd_objectstore: filestore dmcrypt: true ceph_rgw_civetweb_port: 8080 -ceph_osd_docker_devices: "{{ devices }}" devices: - /dev/sda - /dev/sdb -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 +ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 diff --git a/tests/functional/centos/7/docker-ded-jrn/group_vars/all b/tests/functional/centos/7/docker-ded-jrn/group_vars/all index 74c84409f..9b34c6325 100644 --- a/tests/functional/centos/7/docker-ded-jrn/group_vars/all +++ b/tests/functional/centos/7/docker-ded-jrn/group_vars/all @@ -14,11 +14,11 @@ ceph_docker_on_openstack: False public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" ceph_rgw_civetweb_port: 8080 -ceph_osd_docker_devices: "{{ devices }}" +osd_objectstore: filestore osd_scenario: non-collocated devices: - /dev/sda dedicated_devices: - /dev/sdb -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} +ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 ceph_osd_docker_run_script_path: /var/tmp diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index 864ec8c9f..a261b406a 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -15,9 +15,8 @@ public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" osd_scenario: collocated ceph_rgw_civetweb_port: 8080 -ceph_osd_docker_devices: "{{ devices }}" -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE +osd_objectstore: filestore +ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 devices: - /dev/sda - /dev/sdb diff --git a/tests/functional/centos/7/jrn-col/group_vars/all b/tests/functional/centos/7/jrn-col/group_vars/all index 88d7a335d..0a0575f2b 100644 --- a/tests/functional/centos/7/jrn-col/group_vars/all +++ b/tests/functional/centos/7/jrn-col/group_vars/all @@ -7,7 +7,7 @@ cluster_network: "192.168.4.0/24" monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 -osd_objectstore: "filestore" +osd_objectstore: filestore devices: - '/dev/sda' - '/dev/sdb'