mirror of https://github.com/ceph/ceph-ansible.git
parent
3bddb00120
commit
9237a98965
|
@ -87,8 +87,8 @@
|
||||||
- name: wait for all osd to be up
|
- name: wait for all osd to be up
|
||||||
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json"
|
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json"
|
||||||
register: wait_for_all_osds_up
|
register: wait_for_all_osds_up
|
||||||
retries: "{{ nb_retry_wait_osd_up }}"
|
retries: "86400"
|
||||||
delay: "{{ delay_wait_osd_up }}"
|
delay: "1"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
until:
|
until:
|
||||||
|
@ -99,6 +99,13 @@
|
||||||
- inventory_hostname == ansible_play_hosts_all | last
|
- inventory_hostname == ansible_play_hosts_all | last
|
||||||
tags: wait_all_osds_up
|
tags: wait_all_osds_up
|
||||||
|
|
||||||
|
- name: sleep 365d
|
||||||
|
command: sleep 365d
|
||||||
|
when:
|
||||||
|
- not ansible_check_mode
|
||||||
|
- inventory_hostname == ansible_play_hosts_all | last
|
||||||
|
- sleep_after_fs2bs | default(False) | bool
|
||||||
|
|
||||||
- name: include crush_rules.yml
|
- name: include crush_rules.yml
|
||||||
include_tasks: crush_rules.yml
|
include_tasks: crush_rules.yml
|
||||||
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
|
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
|
||||||
|
|
|
@ -2,9 +2,5 @@
|
||||||
mon0
|
mon0
|
||||||
|
|
||||||
[osds]
|
[osds]
|
||||||
osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
|
osd0 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
||||||
osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
|
osd1 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
||||||
osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=1024
|
|
||||||
osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
|
|
||||||
osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
|
|
||||||
osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=1024
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ docker: false
|
||||||
|
|
||||||
# DEFINE THE NUMBER OF VMS TO RUN
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
mon_vms: 1
|
mon_vms: 1
|
||||||
osd_vms: 6
|
osd_vms: 2
|
||||||
mds_vms: 0
|
mds_vms: 0
|
||||||
rgw_vms: 0
|
rgw_vms: 0
|
||||||
nfs_vms: 0
|
nfs_vms: 0
|
||||||
|
|
|
@ -43,8 +43,7 @@ commands=
|
||||||
|
|
||||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
|
||||||
|
|
||||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1' --tags partitions
|
||||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
|
|
||||||
|
|
||||||
# deploy the cluster
|
# deploy the cluster
|
||||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
|
||||||
|
@ -57,6 +56,7 @@ commands=
|
||||||
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
|
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
|
||||||
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
|
||||||
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
|
ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} \
|
||||||
|
sleep_after_fs2bs=True \
|
||||||
"
|
"
|
||||||
|
|
||||||
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
|
bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
|
||||||
|
|
Loading…
Reference in New Issue