From 774697ebd83ce6b1d18d2727e55a2a33be93608e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Sun, 8 Oct 2017 14:45:48 +0200 Subject: [PATCH 01/12] infra: use the pg check in the right place MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the pg check before doing the pg check, not on the quorum check. Also never quote int when doing comparaison. Signed-off-by: Sébastien Han --- infrastructure-playbooks/rolling_update.yml | 61 +++++++------------ ...inerized-to-containerized-ceph-daemons.yml | 2 +- 2 files changed, 23 insertions(+), 40 deletions(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 6690108db..5206f4d57 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -147,20 +147,6 @@ when: - mon_host_count | int == 1 - - name: get num_pgs - non container - command: ceph --cluster "{{ cluster }}" -s --format json - register: ceph_pgs - delegate_to: "{{ mon_host }}" - when: - - not containerized_deployment - - - name: get num_pgs - container - command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json - register: ceph_pgs - delegate_to: "{{ mon_host }}" - when: - - containerized_deployment - - name: non container | waiting for the monitor to join the quorum... command: ceph --cluster "{{ cluster }}" -s --format json register: ceph_health_raw @@ -171,7 +157,6 @@ delegate_to: "{{ mon_host }}" when: - not containerized_deployment - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" - name: container | waiting for the containerized monitor to join the quorum... command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json @@ -183,7 +168,6 @@ delegate_to: "{{ mon_host }}" when: - containerized_deployment - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" - name: set osd flags command: ceph osd set {{ item }} --cluster {{ cluster }} @@ -293,32 +277,31 @@ - ansible_service_mgr == 'systemd' - containerized_deployment - - name: waiting for clean pgs... - command: ceph --cluster "{{ cluster }}" -s --format json - register: ceph_health_post - until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 - and - (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" - delegate_to: "{{ groups[mon_group_name][0] }}" - retries: "{{ health_osd_check_retries }}" - delay: "{{ health_osd_check_delay }}" - when: - - not containerized_deployment - - - name: container - waiting for clean pgs... - command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json" - register: ceph_health_post - until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 - and - (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" - delegate_to: "{{ groups[mon_group_name][0] }}" - retries: "{{ health_osd_check_retries }}" - delay: "{{ health_osd_check_delay }}" + - name: set_fact docker_exec_cmd_osd + set_fact: + docker_exec_cmd_update_osd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: - containerized_deployment + - name: get num_pgs - non container + command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" + register: ceph_pgs + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: waiting for clean pgs... + command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" + register: ceph_health_post + until: > + ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 + and + (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" + delegate_to: "{{ groups[mon_group_name][0] }}" + retries: "{{ health_osd_check_retries }}" + delay: "{{ health_osd_check_delay }}" + when: + - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 + + - name: unset osd flags hosts: diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 54a81e1ed..a11034e46 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -296,7 +296,7 @@ retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" when: - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" + - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 - name: switching from non-containerized to containerized ceph mds From 1bd891232c98ac254d40a6c05ee14aa093abc0d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Sun, 8 Oct 2017 15:16:40 +0200 Subject: [PATCH 02/12] config: do not duplicate sections when doing collocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to this commit, when collocating a RGW and NFS on the same box the ceph.conf layout was the following: [client.rgw.rgw0] host = mds0 host = rgw0 rgw frontends = civetweb port=192.168.15.50:8080 num_threads=100[client.rgw.mds0] rgw frontends = civetweb port=192.168.15.70:8080 num_threads=100 rgw frontends = civetweb port=192.168.15.50:8080 num_threads=100 keyring = /var/lib/ceph/radosgw/test-rgw.mds0/keyring keyring = /var/lib/ceph/radosgw/test-rgw.rgw0/keyring rgw data = /var/lib/ceph/radosgw/test-rgw.rgw0 log file = /var/log/ceph/test-rgw-mds0.log log file = /var/log/ceph/test-rgw-rgw0.log [mds.mds0] host = mds0 [global] rgw override bucket index max shards = 16 fsid = 70e1d368-57b3-4978-b746-cbffce6e56b5 rgw bucket default quota max objects = 1638400 osd_pool_default_size = 1 public network = 192.168.15.0/24 mon host = 192.168.15.10,192.168.15.11,192.168.15.12 osd_pool_default_pg_num = 8 cluster network = 192.168.16.0/24 [mds.rgw0] host = rgw0 [client.rgw.mds0] host = mds0 rgw data = /var/lib/ceph/radosgw/test-rgw.mds0 keyring = /var/lib/ceph/radosgw/test-rgw.mds0/keyring rgw frontends = civetweb port=192.168.15.70:8080 num_threads=100 log file = /var/log/ceph/test-rgw-mds0.log Basically appending all the sections. This commits solves that. Now the sections appear like this: -bash-4.2# cat /etc/ceph/test.conf [client.rgw.rgw0] log file = /var/log/ceph/test-rgw-rgw0.log host = rgw0 keyring = /var/lib/ceph/radosgw/test-rgw.rgw0/keyring rgw frontends = civetweb port=192.168.15.50:8080 num_threads=100 [client.rgw.mds0] log file = /var/log/ceph/test-rgw-mds0.log host = mds0 keyring = /var/lib/ceph/radosgw/test-rgw.mds0/keyring rgw frontends = civetweb port=192.168.15.70:8080 num_threads=100 [global] cluster network = 192.168.16.0/24 mon host = 192.168.15.10,192.168.15.11,192.168.15.12 osd_pool_default_size = 1 public network = 192.168.15.0/24 rgw bucket default quota max objects = 1638400 osd_pool_default_pg_num = 8 rgw override bucket index max shards = 16 fsid = 77a21980-3033-4174-9264-1abc7185bcb3 [mds.rgw0] host = rgw0 [mds.mds0] host = mds0 Signed-off-by: Sébastien Han --- roles/ceph-config/templates/ceph.conf.j2 | 41 +++++++++--------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 index 22c21e398..2eb2a56e8 100644 --- a/roles/ceph-config/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -132,6 +132,7 @@ filestore xattr use omap = true {% if groups[mds_group_name] is defined %} {% if mds_group_name in group_names %} +{% if inventory_hostname in groups.get(mds_group_name, []) %} {% for host in groups[mds_group_name] %} {% if hostvars[host]['ansible_fqdn'] is defined and mds_use_fqdn %} [mds.{{ hostvars[host]['ansible_fqdn'] }}] @@ -143,41 +144,48 @@ host = {{ hostvars[host]['ansible_hostname'] }} {% endfor %} {% endif %} {% endif %} +{% endif %} {% if groups[rgw_group_name] is defined %} {% if rgw_group_name in group_names %} +{% if inventory_hostname in groups.get(rgw_group_name, []) %} {% for host in groups[rgw_group_name] %} {% if hostvars[host]['ansible_hostname'] is defined %} [client.rgw.{{ hostvars[host]['ansible_hostname'] }}] host = {{ hostvars[host]['ansible_hostname'] }} keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log -{% if radosgw_address_block | length > 0 %} +{% if hostvars[host]['radosgw_address_block'] is defined and hostvars[host]['radosgw_address_block'] | length > 0 %} {% if ip_version == 'ipv4' -%} rgw frontends = civetweb port={{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} {%- elif ip_version == 'ipv6' -%} rgw frontends = civetweb port=[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- endif %} + {%- endif -%} + {% elif hostvars[host]['radosgw_address'] is defined and hostvars[host]['radosgw_address'] != '0.0.0.0' -%} {% if ip_version == 'ipv4' -%} rgw frontends = civetweb port={{ hostvars[host]['radosgw_address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} {%- elif ip_version == 'ipv6' -%} rgw frontends = civetweb port=[{{ hostvars[host]['radosgw_address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} + {%- endif -%} + {%- else -%} {% if ip_version == 'ipv4' -%} rgw frontends = civetweb port={{ hostvars[host]['ansible_' + hostvars[host]['radosgw_interface']][ip_version]['address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} {%- elif ip_version == 'ipv6' -%} rgw frontends = civetweb port=[{{ hostvars[host]['ansible_' + hostvars[host]['radosgw_interface']][ip_version][0]['address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} + {%- endif -%} + {%- endif %} {% endif %} {% endfor %} {% endif %} {% endif %} +{% endif %} {% if groups[nfs_group_name] is defined %} {% if nfs_group_name in group_names %} +{% if inventory_hostname in groups.get(nfs_group_name, []) %} {% for host in groups[nfs_group_name] %} {% if nfs_obj_gw %} {% if hostvars[host]['ansible_hostname'] is defined %} @@ -185,35 +193,16 @@ log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] host = {{ hostvars[host]['ansible_hostname'] }} keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log -rgw data = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }} -{% if radosgw_address_block | length > 0 %} - {% if ip_version == 'ipv4' -%} - rgw frontends = civetweb port={{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- elif ip_version == 'ipv6' -%} - rgw frontends = civetweb port=[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- endif %} -{% elif hostvars[host]['radosgw_address'] is defined and hostvars[host]['radosgw_address'] != '0.0.0.0' -%} - {% if ip_version == 'ipv4' -%} - rgw frontends = civetweb port={{ hostvars[host]['radosgw_address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- elif ip_version == 'ipv6' -%} - rgw frontends = civetweb port=[{{ hostvars[host]['radosgw_address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} -{%- else -%} - {% set interface = 'ansible_' + hostvars[host]['radosgw_interface'] %} - {% if ip_version == 'ipv6' -%} - rgw frontends = civetweb port=[{{ hostvars[host][interface][ip_version][0]['address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- elif ip_version == 'ipv4' -%} - rgw frontends = civetweb port={{ hostvars[host][interface][ip_version]['address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} -{%- endif %} {% endif %} {% endif %} {% endfor %} {% endif %} {% endif %} +{% endif %} {% if groups[restapi_group_name] is defined %} {% if restapi_group_name in group_names %} +{% if inventory_hostname in groups.get(restapi_group_name, []) %} [client.restapi] {% if restapi_interface != "interface" %} {% include 'client_restapi_interface.j2' %} @@ -224,4 +213,4 @@ keyring = /var/lib/ceph/restapi/ceph-restapi/keyring log file = /var/log/ceph/ceph-restapi.log {% endif %} {% endif %} - +{% endif %} From 450108fab92b03f2aec3368a218801c3a095850f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Sun, 8 Oct 2017 15:54:36 +0200 Subject: [PATCH 03/12] infra: add independant purge-iscsi-gateways.yml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current inclusion of purge-iscsi-gateways.yml in purge-cluster.yml is not working well and blocking the CI too. So removing it from purge-cluster.yml and re-add the original purge-iscsi-gateways.yml. Signed-off-by: Sébastien Han --- infrastructure-playbooks/purge-cluster.yml | 32 ---------------- .../purge-iscsi-gateways.yml | 37 +++++++++++++++++++ 2 files changed, 37 insertions(+), 32 deletions(-) create mode 100644 infrastructure-playbooks/purge-iscsi-gateways.yml diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 3216ea0e4..5c2dc27af 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -503,38 +503,6 @@ - /var/lib/ceph/tmp -- name: purge iscsi gateway(s) - - vars: - igw_purge_type: all - - hosts: - - "{{ iscsi_gw_group_name|default('iscsi-gws') }}" - - gather_facts: false # already gathered previously - - become: true - - tasks: - - - name: igw_purge | purging the gateway configuration - igw_purge: - mode: "gateway" - - - name: igw_purge | deleting configured rbd devices - igw_purge: - mode: "disks" - when: - - igw_purge_type == 'all' - - - name: restart rbd-target-gw daemons - service: - name: rbd-target-gw - state: restarted - when: - - ansible_service_mgr == 'systemd' - - - name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data vars: diff --git a/infrastructure-playbooks/purge-iscsi-gateways.yml b/infrastructure-playbooks/purge-iscsi-gateways.yml new file mode 100644 index 000000000..157dce414 --- /dev/null +++ b/infrastructure-playbooks/purge-iscsi-gateways.yml @@ -0,0 +1,37 @@ +--- + +- name: Confirm removal of the iSCSI gateway configuration + hosts: localhost + + vars_prompt: + - name: purge_config + prompt: Which configuration elements should be purged? (all, lio or abort) + default: 'abort' + private: no + + tasks: + - name: Exit playbook if user aborted the purge + fail: + msg: > + "You have aborted the purge of the iSCSI gateway configuration" + when: purge_config == 'abort' + + - set_fact: + igw_purge_type: "{{ purge_config }}" + +- name: Removing the gateway configuration + hosts: ceph-iscsi-gw + vars: + - igw_purge_type: "{{hostvars['localhost']['igw_purge_type']}}" + + tasks: + - name: igw_purge | purging the gateway configuration + igw_purge: mode="gateway" + + - name: igw_purge | deleting configured rbd devices + igw_purge: mode="disks" + when: igw_purge_type == 'all' + + - name: restart rbd-target-gw daemons + service: name=rbd-target-gw state=restarted + From e15302c2847c900013a994213d62a8523b2eeb5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Sun, 8 Oct 2017 15:56:39 +0200 Subject: [PATCH 04/12] tox: decrease sleep to 2min instead of 5. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- tox.ini | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 7a10920bc..187dc853a 100644 --- a/tox.ini +++ b/tox.ini @@ -217,16 +217,16 @@ commands= ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml - # wait 5 minutes for services to be ready - sleep 300 + # wait 2 minutes for services to be ready + sleep 120 # test cluster state using ceph-ansible tests testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # reboot all vms vagrant reload --no-provision - # wait 5 minutes for services to be ready - sleep 300 + # wait 2 minutes for services to be ready + sleep 120 # retest to ensure cluster came back up correctly after rebooting testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests From 6d7b73fa91e16fd726c3b63b3fbb93c8bffab810 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Sun, 8 Oct 2017 17:29:32 +0200 Subject: [PATCH 05/12] ci: re-add osd_pool_default_size to 1 with the override MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we don't do this the client will create pools with a replica 3 since osd_pool_default_size was gone in ceph-override.json. This was making switch_to_containers failing. Signed-off-by: Sébastien Han --- tests/functional/centos/7/cluster/ceph-override.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/functional/centos/7/cluster/ceph-override.json b/tests/functional/centos/7/cluster/ceph-override.json index 9a3af9436..965e361f9 100644 --- a/tests/functional/centos/7/cluster/ceph-override.json +++ b/tests/functional/centos/7/cluster/ceph-override.json @@ -1,7 +1,8 @@ { "ceph_conf_overrides": { "global": { - "osd_pool_default_pg_num": 12 + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1 } } } From bf99751ce1a3f9a2cb7bc7b1d78e01dcdb8dbbd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 9 Oct 2017 10:41:14 +0200 Subject: [PATCH 06/12] osd: bindmount /run/udev MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensures that "udevadm" is able to check the status of udev's event queue. Signed-off-by: Sébastien Han --- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index bd854ae0a..7cb5f25c8 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -32,6 +32,7 @@ expose_partitions "$1" {% endif -%} -v /dev:/dev \ -v /etc/localtime:/etc/localtime:ro \ + -v /run/udev:/run/udev:ro \ -v /var/lib/ceph:/var/lib/ceph \ -v /etc/ceph:/etc/ceph \ $DOCKER_ENV \ From 88a37ca99027bd7fc9cce052ce6e39e46c9abd05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 9 Oct 2017 11:56:46 +0200 Subject: [PATCH 07/12] ci: disable vnc console MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index 4dbc9c37a..3f5a00f24 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -135,6 +135,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.provider :libvirt do |lv| lv.cpu_mode = 'host-passthrough' lv.volume_cache = 'unsafe' + lv.graphics_type = 'none' end # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox From a1ea6e7f59599e0f98c4d378c27b787407f809a1 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 9 Oct 2017 13:48:03 +0200 Subject: [PATCH 08/12] tests: adapt current testing for collocation scenario Since we introduced collocation testing scenario, we need to adapt current tests to this new scenario. Signed-off-by: Guillaume Abrioux --- tests/functional/tests/mds/test_mds.py | 8 +++++--- tests/functional/tests/mgr/test_mgr.py | 11 ++++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/tests/functional/tests/mds/test_mds.py b/tests/functional/tests/mds/test_mds.py index a8665868c..6e88b53b9 100644 --- a/tests/functional/tests/mds/test_mds.py +++ b/tests/functional/tests/mds/test_mds.py @@ -34,6 +34,8 @@ class TestMDSs(object): hostname=node["vars"]["inventory_hostname"], cluster=node["cluster_name"] ) - output = host.check_output(cmd) - daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"] - assert hostname in daemons + output_raw = host.check_output(cmd) + output_json = json.loads(output_raw) + active_daemon = output_json["fsmap"]["by_rank"][0]["name"] + if active_daemon != hostname: + assert output_json['fsmap']['up:standby'] == 1 diff --git a/tests/functional/tests/mgr/test_mgr.py b/tests/functional/tests/mgr/test_mgr.py index 146ac0062..488c7ccb9 100644 --- a/tests/functional/tests/mgr/test_mgr.py +++ b/tests/functional/tests/mgr/test_mgr.py @@ -39,6 +39,11 @@ class TestMGRs(object): hostname=node["vars"]["inventory_hostname"], cluster=node["cluster_name"] ) - output = host.check_output(cmd) - daemons = json.loads(output)["mgrmap"]["active_name"] - assert hostname in daemons + output_raw = host.check_output(cmd) + output_json = json.loads(output_raw) + daemons = output_json['mgrmap']['active_name'] + standbys = [i['name'] for i in output_json['mgrmap']['standbys']] + result = hostname in daemons + if not result: + result = hostname in standbys + assert result From 1581a1c078f3d09370ca3ce41992bc35794643fe Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 9 Oct 2017 15:26:15 +0200 Subject: [PATCH 09/12] mgr: move installation packages in role itself Make role `ceph-mgr` handling itself the installation of `ceph-mgr` package because it's complicated to manage it regarding we are going to install `jewel vs. luminous` Signed-off-by: Guillaume Abrioux --- .../tasks/installs/install_debian_packages.yml | 8 -------- .../installs/install_debian_rhcs_packages.yml | 7 ------- .../tasks/installs/install_redhat_packages.yml | 7 ------- roles/ceph-mgr/tasks/pre_requisite.yml | 15 +++++++++++++++ 4 files changed, 15 insertions(+), 22 deletions(-) diff --git a/roles/ceph-common/tasks/installs/install_debian_packages.yml b/roles/ceph-common/tasks/installs/install_debian_packages.yml index 7092d29bb..994fbcb1a 100644 --- a/roles/ceph-common/tasks/installs/install_debian_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_packages.yml @@ -60,11 +60,3 @@ when: - nfs_group_name in group_names - nfs_obj_gw - -- name: install ceph mgr for debian - apt: - name: ceph-mgr - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" - when: - - mgr_group_name in group_names diff --git a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml index bcc444f93..468d76ae6 100644 --- a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml @@ -70,10 +70,3 @@ when: - nfs_group_name in group_names - nfs_obj_gw - -- name: install ceph mgr for debian - apt: - pkg: ceph-mgr - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mgr_group_name in group_names diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index ea12b07f4..31c749c3d 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -94,13 +94,6 @@ - nfs_group_name in group_names - nfs_obj_gw -- name: install redhat ceph-mgr package - package: - name: ceph-mgr - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mgr_group_name in group_names - - name: install redhat ceph iscsi package package: name: "{{ item }}" diff --git a/roles/ceph-mgr/tasks/pre_requisite.yml b/roles/ceph-mgr/tasks/pre_requisite.yml index 80e9b3f45..4c3e314e1 100644 --- a/roles/ceph-mgr/tasks/pre_requisite.yml +++ b/roles/ceph-mgr/tasks/pre_requisite.yml @@ -1,4 +1,19 @@ --- +- name: install redhat ceph-mgr package + package: + name: ceph-mgr + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - ansible_os_family == 'RedHat' + +- name: install ceph mgr for debian + apt: + name: ceph-mgr + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" + when: + - ansible_os_family == 'Debian' + - name: create mgr directory file: path: /var/lib/ceph/mgr/ From 4032f102fe0285344e1ff960cab806f46977c76f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 9 Oct 2017 15:30:54 +0200 Subject: [PATCH 10/12] iscsi: move package install to ceph-iscsi-role MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- .../tasks/installs/install_redhat_packages.yml | 11 ----------- roles/ceph-iscsi-gw/tasks/prerequisites.yml | 11 +++++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index 31c749c3d..8154a054f 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -93,14 +93,3 @@ when: - nfs_group_name in group_names - nfs_obj_gw - -- name: install redhat ceph iscsi package - package: - name: "{{ item }}" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - with_items: - - tcmu-runner - - ceph-iscsi-config - - targetcli - when: - - iscsi_gw_group_name in group_names diff --git a/roles/ceph-iscsi-gw/tasks/prerequisites.yml b/roles/ceph-iscsi-gw/tasks/prerequisites.yml index 6b877100f..3f7ef97a1 100644 --- a/roles/ceph-iscsi-gw/tasks/prerequisites.yml +++ b/roles/ceph-iscsi-gw/tasks/prerequisites.yml @@ -1,4 +1,15 @@ --- +- name: install redhat ceph iscsi package + package: + name: "{{ item }}" + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + with_items: + - tcmu-runner + - ceph-iscsi-config + - targetcli + when: + - ansible_os_family == 'RedHat' + - name: check the status of the target.service override stat: path: /etc/systemd/system/target.service From 3c64abe07d0775fd905ab8ffc8d37a040af90677 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 9 Oct 2017 15:37:51 +0200 Subject: [PATCH 11/12] mds: move installation packages in role itself Make role `ceph-mds` handling itself the installation of `ceph-mds` package. Signed-off-by: Guillaume Abrioux --- .../tasks/installs/install_debian_packages.yml | 8 -------- .../installs/install_debian_rhcs_packages.yml | 7 ------- .../tasks/installs/install_redhat_packages.yml | 7 ------- roles/ceph-mds/tasks/non_containerized.yml | 17 +++++++++++++++++ 4 files changed, 17 insertions(+), 22 deletions(-) diff --git a/roles/ceph-common/tasks/installs/install_debian_packages.yml b/roles/ceph-common/tasks/installs/install_debian_packages.yml index 994fbcb1a..443da199e 100644 --- a/roles/ceph-common/tasks/installs/install_debian_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_packages.yml @@ -28,14 +28,6 @@ when: - rgw_group_name in group_names -- name: install ceph mds for debian - apt: - name: ceph-mds - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" - when: - - mds_group_name in group_names - - name: install jemalloc for debian apt: name: libjemalloc1 diff --git a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml index 468d76ae6..753b87e0c 100644 --- a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml @@ -27,13 +27,6 @@ when: - rgw_group_name in group_names -- name: install red hat storage ceph mds for debian - apt: - pkg: ceph-mds - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mds_group_name in group_names - - name: install red hat storage ceph-fuse client for debian apt: pkg: ceph-fuse diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index 8154a054f..291d1cca4 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -34,13 +34,6 @@ when: - osd_group_name in group_names -- name: install redhat ceph-mds package - package: - name: "ceph-mds" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mds_group_name in group_names - - name: install redhat ceph-fuse package package: name: "ceph-fuse" diff --git a/roles/ceph-mds/tasks/non_containerized.yml b/roles/ceph-mds/tasks/non_containerized.yml index cff635f46..9cac6e6bf 100644 --- a/roles/ceph-mds/tasks/non_containerized.yml +++ b/roles/ceph-mds/tasks/non_containerized.yml @@ -1,4 +1,21 @@ --- +- name: install ceph mds for debian + apt: + name: ceph-mds + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" + when: + - mds_group_name in group_names + - ansible_os_family == 'Debian' + +- name: install redhat ceph-mds package + package: + name: "ceph-mds" + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - mds_group_name in group_names + - ansible_os_family == 'RedHat' + - name: create bootstrap-mds directory file: path: /var/lib/ceph/bootstrap-mds/ From 9e8204d9e8dfb899bafd6b88107d221891e9a066 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 9 Oct 2017 17:10:39 +0200 Subject: [PATCH 12/12] nfs: move packages installation to own role Make role `ceph-nfs` handling itself the installation of nfs packages. Signed-off-by: Guillaume Abrioux --- .../installs/install_debian_packages.yml | 25 ------ .../installs/install_debian_rhcs_packages.yml | 23 ----- .../installs/install_redhat_packages.yml | 26 +----- .../installs/redhat_community_repository.yml | 3 +- .../tasks/pre_requisite_non_container.yml | 83 +++++++++++++++++++ 5 files changed, 85 insertions(+), 75 deletions(-) diff --git a/roles/ceph-common/tasks/installs/install_debian_packages.yml b/roles/ceph-common/tasks/installs/install_debian_packages.yml index 443da199e..46d65b665 100644 --- a/roles/ceph-common/tasks/installs/install_debian_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_packages.yml @@ -27,28 +27,3 @@ update_cache: yes when: - rgw_group_name in group_names - -- name: install jemalloc for debian - apt: - name: libjemalloc1 - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - update_cache: yes - when: - - nfs_group_name in group_names - -- name: install nfs cephfs gateway - package: - name: nfs-ganesha-ceph - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install nfs rgw gateway - package: - name: "{{ item }}" - with_items: - - nfs-ganesha-rgw - - radosgw - when: - - nfs_group_name in group_names - - nfs_obj_gw diff --git a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml index 753b87e0c..69bcf18ee 100644 --- a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml @@ -40,26 +40,3 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" when: - client_group_name in group_names - -- name: install red hat storage nfs gateway for debian - apt: - name: nfs-ganesha - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - nfs_group_name in group_names - -- name: install red hat storage nfs file gateway - apt: - name: nfs-ganesha-ceph - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install red hat storage nfs obj gateway - apt: - name: nfs-ganesha-rgw - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - nfs_group_name in group_names - - nfs_obj_gw diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index 291d1cca4..c431a616f 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -8,7 +8,7 @@ - ansible_distribution == 'RedHat' - name: install centos dependencies - package: + yum: name: "{{ item }}" state: present with_items: "{{ centos_package_dependencies }}" @@ -62,27 +62,3 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" when: - rgw_group_name in group_names - -- name: install redhat nfs-ganesha-ceph package - package: - name: nfs-ganesha-ceph - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install nfs cephfs gateway - package: - name: nfs-ganesha-ceph - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install redhat nfs-ganesha-rgw and ceph-radosgw packages - package: - name: "{{ item }}" - with_items: - - nfs-ganesha-rgw - - ceph-radosgw - when: - - nfs_group_name in group_names - - nfs_obj_gw diff --git a/roles/ceph-common/tasks/installs/redhat_community_repository.yml b/roles/ceph-common/tasks/installs/redhat_community_repository.yml index 8d261e5dd..5aff95e0b 100644 --- a/roles/ceph-common/tasks/installs/redhat_community_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_community_repository.yml @@ -20,8 +20,7 @@ gpgcheck: yes state: present gpgkey: "{{ ceph_stable_key }}" - baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}/$basearch" + baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/luminous/$basearch" when: - nfs_group_name in group_names - nfs_ganesha_stable - diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml index 88e281be0..822eff8af 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml @@ -1,4 +1,87 @@ --- +- name: install redhat nfs-ganesha-ceph package + package: + name: nfs-ganesha-ceph + when: + - nfs_file_gw + - ansible_os_family == 'RedHat' + +- name: install nfs cephfs gateway + package: + name: nfs-ganesha-ceph + when: + - nfs_file_gw + - ansible_os_family == 'RedHat' + +- name: install redhat nfs-ganesha-rgw and ceph-radosgw packages + package: + name: "{{ item }}" + with_items: + - nfs-ganesha-rgw + - ceph-radosgw + when: + - nfs_obj_gw + - ansible_os_family == 'RedHat' + +- name: install jemalloc for debian + apt: + name: libjemalloc1 + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + update_cache: yes + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository != 'rhcs' + - ansible_os_family == 'Debian' + +# debian installation +- name: install nfs cephfs gateway + package: + name: nfs-ganesha-ceph + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository != 'rhcs' + - ansible_os_family == 'Debian' + - nfs_file_gw + +- name: install nfs rgw gateway + package: + name: "{{ item }}" + with_items: + - nfs-ganesha-rgw + - radosgw + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository != 'rhcs' + - ansible_os_family == 'Debian' + - nfs_obj_gw + +# debian_rhcs installation +- name: install red hat storage nfs gateway for debian + apt: + name: nfs-ganesha + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository == 'rhcs' + +- name: install red hat storage nfs file gateway + apt: + name: nfs-ganesha-ceph + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository == 'rhcs' + - nfs_file_gw + +- name: install red hat storage nfs obj gateway + apt: + name: nfs-ganesha-rgw + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository == 'rhcs' + - nfs_obj_gw + # NOTE (leseb): we use root:ceph for permissions since ganesha # does not have the right selinux context to read ceph directories. - name: create rados gateway and ganesha directories