diff --git a/Vagrantfile b/Vagrantfile index 4dbc9c37a..3f5a00f24 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -135,6 +135,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.provider :libvirt do |lv| lv.cpu_mode = 'host-passthrough' lv.volume_cache = 'unsafe' + lv.graphics_type = 'none' end # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index d51030756..41589f8d1 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -509,38 +509,6 @@ - /var/lib/ceph/tmp -- name: purge iscsi gateway(s) - - vars: - igw_purge_type: all - - hosts: - - "{{ iscsi_gw_group_name|default('iscsi-gws') }}" - - gather_facts: false # already gathered previously - - become: true - - tasks: - - - name: igw_purge | purging the gateway configuration - igw_purge: - mode: "gateway" - - - name: igw_purge | deleting configured rbd devices - igw_purge: - mode: "disks" - when: - - igw_purge_type == 'all' - - - name: restart rbd-target-gw daemons - service: - name: rbd-target-gw - state: restarted - when: - - ansible_service_mgr == 'systemd' - - - name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data vars: diff --git a/infrastructure-playbooks/purge-iscsi-gateways.yml b/infrastructure-playbooks/purge-iscsi-gateways.yml new file mode 100644 index 000000000..157dce414 --- /dev/null +++ b/infrastructure-playbooks/purge-iscsi-gateways.yml @@ -0,0 +1,37 @@ +--- + +- name: Confirm removal of the iSCSI gateway configuration + hosts: localhost + + vars_prompt: + - name: purge_config + prompt: Which configuration elements should be purged? (all, lio or abort) + default: 'abort' + private: no + + tasks: + - name: Exit playbook if user aborted the purge + fail: + msg: > + "You have aborted the purge of the iSCSI gateway configuration" + when: purge_config == 'abort' + + - set_fact: + igw_purge_type: "{{ purge_config }}" + +- name: Removing the gateway configuration + hosts: ceph-iscsi-gw + vars: + - igw_purge_type: "{{hostvars['localhost']['igw_purge_type']}}" + + tasks: + - name: igw_purge | purging the gateway configuration + igw_purge: mode="gateway" + + - name: igw_purge | deleting configured rbd devices + igw_purge: mode="disks" + when: igw_purge_type == 'all' + + - name: restart rbd-target-gw daemons + service: name=rbd-target-gw state=restarted + diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 6690108db..5206f4d57 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -147,20 +147,6 @@ when: - mon_host_count | int == 1 - - name: get num_pgs - non container - command: ceph --cluster "{{ cluster }}" -s --format json - register: ceph_pgs - delegate_to: "{{ mon_host }}" - when: - - not containerized_deployment - - - name: get num_pgs - container - command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json - register: ceph_pgs - delegate_to: "{{ mon_host }}" - when: - - containerized_deployment - - name: non container | waiting for the monitor to join the quorum... command: ceph --cluster "{{ cluster }}" -s --format json register: ceph_health_raw @@ -171,7 +157,6 @@ delegate_to: "{{ mon_host }}" when: - not containerized_deployment - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" - name: container | waiting for the containerized monitor to join the quorum... command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json @@ -183,7 +168,6 @@ delegate_to: "{{ mon_host }}" when: - containerized_deployment - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" - name: set osd flags command: ceph osd set {{ item }} --cluster {{ cluster }} @@ -293,32 +277,31 @@ - ansible_service_mgr == 'systemd' - containerized_deployment - - name: waiting for clean pgs... - command: ceph --cluster "{{ cluster }}" -s --format json - register: ceph_health_post - until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 - and - (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" - delegate_to: "{{ groups[mon_group_name][0] }}" - retries: "{{ health_osd_check_retries }}" - delay: "{{ health_osd_check_delay }}" - when: - - not containerized_deployment - - - name: container - waiting for clean pgs... - command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json" - register: ceph_health_post - until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 - and - (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" - delegate_to: "{{ groups[mon_group_name][0] }}" - retries: "{{ health_osd_check_retries }}" - delay: "{{ health_osd_check_delay }}" + - name: set_fact docker_exec_cmd_osd + set_fact: + docker_exec_cmd_update_osd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: - containerized_deployment + - name: get num_pgs - non container + command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" + register: ceph_pgs + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: waiting for clean pgs... + command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" + register: ceph_health_post + until: > + ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 + and + (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" + delegate_to: "{{ groups[mon_group_name][0] }}" + retries: "{{ health_osd_check_retries }}" + delay: "{{ health_osd_check_delay }}" + when: + - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 + + - name: unset osd flags hosts: diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 54a81e1ed..a11034e46 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -296,7 +296,7 @@ retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" when: - - (ceph_pgs.stdout | from_json).pgmap.num_pgs != "0" + - (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0 - name: switching from non-containerized to containerized ceph mds diff --git a/roles/ceph-common/tasks/installs/install_debian_packages.yml b/roles/ceph-common/tasks/installs/install_debian_packages.yml index 7092d29bb..46d65b665 100644 --- a/roles/ceph-common/tasks/installs/install_debian_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_packages.yml @@ -27,44 +27,3 @@ update_cache: yes when: - rgw_group_name in group_names - -- name: install ceph mds for debian - apt: - name: ceph-mds - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" - when: - - mds_group_name in group_names - -- name: install jemalloc for debian - apt: - name: libjemalloc1 - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - update_cache: yes - when: - - nfs_group_name in group_names - -- name: install nfs cephfs gateway - package: - name: nfs-ganesha-ceph - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install nfs rgw gateway - package: - name: "{{ item }}" - with_items: - - nfs-ganesha-rgw - - radosgw - when: - - nfs_group_name in group_names - - nfs_obj_gw - -- name: install ceph mgr for debian - apt: - name: ceph-mgr - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" - when: - - mgr_group_name in group_names diff --git a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml index bcc444f93..69bcf18ee 100644 --- a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml @@ -27,13 +27,6 @@ when: - rgw_group_name in group_names -- name: install red hat storage ceph mds for debian - apt: - pkg: ceph-mds - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mds_group_name in group_names - - name: install red hat storage ceph-fuse client for debian apt: pkg: ceph-fuse @@ -47,33 +40,3 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" when: - client_group_name in group_names - -- name: install red hat storage nfs gateway for debian - apt: - name: nfs-ganesha - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - nfs_group_name in group_names - -- name: install red hat storage nfs file gateway - apt: - name: nfs-ganesha-ceph - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install red hat storage nfs obj gateway - apt: - name: nfs-ganesha-rgw - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - nfs_group_name in group_names - - nfs_obj_gw - -- name: install ceph mgr for debian - apt: - pkg: ceph-mgr - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mgr_group_name in group_names diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index ea12b07f4..c431a616f 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -8,7 +8,7 @@ - ansible_distribution == 'RedHat' - name: install centos dependencies - package: + yum: name: "{{ item }}" state: present with_items: "{{ centos_package_dependencies }}" @@ -34,13 +34,6 @@ when: - osd_group_name in group_names -- name: install redhat ceph-mds package - package: - name: "ceph-mds" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mds_group_name in group_names - - name: install redhat ceph-fuse package package: name: "ceph-fuse" @@ -69,45 +62,3 @@ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" when: - rgw_group_name in group_names - -- name: install redhat nfs-ganesha-ceph package - package: - name: nfs-ganesha-ceph - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install nfs cephfs gateway - package: - name: nfs-ganesha-ceph - when: - - nfs_group_name in group_names - - nfs_file_gw - -- name: install redhat nfs-ganesha-rgw and ceph-radosgw packages - package: - name: "{{ item }}" - with_items: - - nfs-ganesha-rgw - - ceph-radosgw - when: - - nfs_group_name in group_names - - nfs_obj_gw - -- name: install redhat ceph-mgr package - package: - name: ceph-mgr - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - when: - - mgr_group_name in group_names - -- name: install redhat ceph iscsi package - package: - name: "{{ item }}" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" - with_items: - - tcmu-runner - - ceph-iscsi-config - - targetcli - when: - - iscsi_gw_group_name in group_names diff --git a/roles/ceph-common/tasks/installs/redhat_community_repository.yml b/roles/ceph-common/tasks/installs/redhat_community_repository.yml index 8d261e5dd..5aff95e0b 100644 --- a/roles/ceph-common/tasks/installs/redhat_community_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_community_repository.yml @@ -20,8 +20,7 @@ gpgcheck: yes state: present gpgkey: "{{ ceph_stable_key }}" - baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}/$basearch" + baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/luminous/$basearch" when: - nfs_group_name in group_names - nfs_ganesha_stable - diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 index 22c21e398..2eb2a56e8 100644 --- a/roles/ceph-config/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -132,6 +132,7 @@ filestore xattr use omap = true {% if groups[mds_group_name] is defined %} {% if mds_group_name in group_names %} +{% if inventory_hostname in groups.get(mds_group_name, []) %} {% for host in groups[mds_group_name] %} {% if hostvars[host]['ansible_fqdn'] is defined and mds_use_fqdn %} [mds.{{ hostvars[host]['ansible_fqdn'] }}] @@ -143,41 +144,48 @@ host = {{ hostvars[host]['ansible_hostname'] }} {% endfor %} {% endif %} {% endif %} +{% endif %} {% if groups[rgw_group_name] is defined %} {% if rgw_group_name in group_names %} +{% if inventory_hostname in groups.get(rgw_group_name, []) %} {% for host in groups[rgw_group_name] %} {% if hostvars[host]['ansible_hostname'] is defined %} [client.rgw.{{ hostvars[host]['ansible_hostname'] }}] host = {{ hostvars[host]['ansible_hostname'] }} keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log -{% if radosgw_address_block | length > 0 %} +{% if hostvars[host]['radosgw_address_block'] is defined and hostvars[host]['radosgw_address_block'] | length > 0 %} {% if ip_version == 'ipv4' -%} rgw frontends = civetweb port={{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} {%- elif ip_version == 'ipv6' -%} rgw frontends = civetweb port=[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- endif %} + {%- endif -%} + {% elif hostvars[host]['radosgw_address'] is defined and hostvars[host]['radosgw_address'] != '0.0.0.0' -%} {% if ip_version == 'ipv4' -%} rgw frontends = civetweb port={{ hostvars[host]['radosgw_address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} {%- elif ip_version == 'ipv6' -%} rgw frontends = civetweb port=[{{ hostvars[host]['radosgw_address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} + {%- endif -%} + {%- else -%} {% if ip_version == 'ipv4' -%} rgw frontends = civetweb port={{ hostvars[host]['ansible_' + hostvars[host]['radosgw_interface']][ip_version]['address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} {%- elif ip_version == 'ipv6' -%} rgw frontends = civetweb port=[{{ hostvars[host]['ansible_' + hostvars[host]['radosgw_interface']][ip_version][0]['address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} + {%- endif -%} + {%- endif %} {% endif %} {% endfor %} {% endif %} {% endif %} +{% endif %} {% if groups[nfs_group_name] is defined %} {% if nfs_group_name in group_names %} +{% if inventory_hostname in groups.get(nfs_group_name, []) %} {% for host in groups[nfs_group_name] %} {% if nfs_obj_gw %} {% if hostvars[host]['ansible_hostname'] is defined %} @@ -185,35 +193,16 @@ log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] host = {{ hostvars[host]['ansible_hostname'] }} keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log -rgw data = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }} -{% if radosgw_address_block | length > 0 %} - {% if ip_version == 'ipv4' -%} - rgw frontends = civetweb port={{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- elif ip_version == 'ipv6' -%} - rgw frontends = civetweb port=[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- endif %} -{% elif hostvars[host]['radosgw_address'] is defined and hostvars[host]['radosgw_address'] != '0.0.0.0' -%} - {% if ip_version == 'ipv4' -%} - rgw frontends = civetweb port={{ hostvars[host]['radosgw_address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- elif ip_version == 'ipv6' -%} - rgw frontends = civetweb port=[{{ hostvars[host]['radosgw_address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} -{%- else -%} - {% set interface = 'ansible_' + hostvars[host]['radosgw_interface'] %} - {% if ip_version == 'ipv6' -%} - rgw frontends = civetweb port=[{{ hostvars[host][interface][ip_version][0]['address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {%- elif ip_version == 'ipv4' -%} - rgw frontends = civetweb port={{ hostvars[host][interface][ip_version]['address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }} - {% endif %} -{%- endif %} {% endif %} {% endif %} {% endfor %} {% endif %} {% endif %} +{% endif %} {% if groups[restapi_group_name] is defined %} {% if restapi_group_name in group_names %} +{% if inventory_hostname in groups.get(restapi_group_name, []) %} [client.restapi] {% if restapi_interface != "interface" %} {% include 'client_restapi_interface.j2' %} @@ -224,4 +213,4 @@ keyring = /var/lib/ceph/restapi/ceph-restapi/keyring log file = /var/log/ceph/ceph-restapi.log {% endif %} {% endif %} - +{% endif %} diff --git a/roles/ceph-iscsi-gw/tasks/prerequisites.yml b/roles/ceph-iscsi-gw/tasks/prerequisites.yml index 6b877100f..3f7ef97a1 100644 --- a/roles/ceph-iscsi-gw/tasks/prerequisites.yml +++ b/roles/ceph-iscsi-gw/tasks/prerequisites.yml @@ -1,4 +1,15 @@ --- +- name: install redhat ceph iscsi package + package: + name: "{{ item }}" + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + with_items: + - tcmu-runner + - ceph-iscsi-config + - targetcli + when: + - ansible_os_family == 'RedHat' + - name: check the status of the target.service override stat: path: /etc/systemd/system/target.service diff --git a/roles/ceph-mds/tasks/non_containerized.yml b/roles/ceph-mds/tasks/non_containerized.yml index cff635f46..9cac6e6bf 100644 --- a/roles/ceph-mds/tasks/non_containerized.yml +++ b/roles/ceph-mds/tasks/non_containerized.yml @@ -1,4 +1,21 @@ --- +- name: install ceph mds for debian + apt: + name: ceph-mds + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" + when: + - mds_group_name in group_names + - ansible_os_family == 'Debian' + +- name: install redhat ceph-mds package + package: + name: "ceph-mds" + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - mds_group_name in group_names + - ansible_os_family == 'RedHat' + - name: create bootstrap-mds directory file: path: /var/lib/ceph/bootstrap-mds/ diff --git a/roles/ceph-mgr/tasks/pre_requisite.yml b/roles/ceph-mgr/tasks/pre_requisite.yml index 80e9b3f45..4c3e314e1 100644 --- a/roles/ceph-mgr/tasks/pre_requisite.yml +++ b/roles/ceph-mgr/tasks/pre_requisite.yml @@ -1,4 +1,19 @@ --- +- name: install redhat ceph-mgr package + package: + name: ceph-mgr + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - ansible_os_family == 'RedHat' + +- name: install ceph mgr for debian + apt: + name: ceph-mgr + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + default_release: "{{ ceph_stable_release_uca | default(omit) }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}" + when: + - ansible_os_family == 'Debian' + - name: create mgr directory file: path: /var/lib/ceph/mgr/ diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml index 88e281be0..822eff8af 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml @@ -1,4 +1,87 @@ --- +- name: install redhat nfs-ganesha-ceph package + package: + name: nfs-ganesha-ceph + when: + - nfs_file_gw + - ansible_os_family == 'RedHat' + +- name: install nfs cephfs gateway + package: + name: nfs-ganesha-ceph + when: + - nfs_file_gw + - ansible_os_family == 'RedHat' + +- name: install redhat nfs-ganesha-rgw and ceph-radosgw packages + package: + name: "{{ item }}" + with_items: + - nfs-ganesha-rgw + - ceph-radosgw + when: + - nfs_obj_gw + - ansible_os_family == 'RedHat' + +- name: install jemalloc for debian + apt: + name: libjemalloc1 + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + update_cache: yes + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository != 'rhcs' + - ansible_os_family == 'Debian' + +# debian installation +- name: install nfs cephfs gateway + package: + name: nfs-ganesha-ceph + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository != 'rhcs' + - ansible_os_family == 'Debian' + - nfs_file_gw + +- name: install nfs rgw gateway + package: + name: "{{ item }}" + with_items: + - nfs-ganesha-rgw + - radosgw + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository != 'rhcs' + - ansible_os_family == 'Debian' + - nfs_obj_gw + +# debian_rhcs installation +- name: install red hat storage nfs gateway for debian + apt: + name: nfs-ganesha + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository == 'rhcs' + +- name: install red hat storage nfs file gateway + apt: + name: nfs-ganesha-ceph + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository == 'rhcs' + - nfs_file_gw + +- name: install red hat storage nfs obj gateway + apt: + name: nfs-ganesha-rgw + state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ceph_repository == 'rhcs' + - nfs_obj_gw + # NOTE (leseb): we use root:ceph for permissions since ganesha # does not have the right selinux context to read ceph directories. - name: create rados gateway and ganesha directories diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index bd854ae0a..7cb5f25c8 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -32,6 +32,7 @@ expose_partitions "$1" {% endif -%} -v /dev:/dev \ -v /etc/localtime:/etc/localtime:ro \ + -v /run/udev:/run/udev:ro \ -v /var/lib/ceph:/var/lib/ceph \ -v /etc/ceph:/etc/ceph \ $DOCKER_ENV \ diff --git a/tests/functional/centos/7/cluster/ceph-override.json b/tests/functional/centos/7/cluster/ceph-override.json index 9a3af9436..965e361f9 100644 --- a/tests/functional/centos/7/cluster/ceph-override.json +++ b/tests/functional/centos/7/cluster/ceph-override.json @@ -1,7 +1,8 @@ { "ceph_conf_overrides": { "global": { - "osd_pool_default_pg_num": 12 + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1 } } } diff --git a/tests/functional/tests/mds/test_mds.py b/tests/functional/tests/mds/test_mds.py index a8665868c..6e88b53b9 100644 --- a/tests/functional/tests/mds/test_mds.py +++ b/tests/functional/tests/mds/test_mds.py @@ -34,6 +34,8 @@ class TestMDSs(object): hostname=node["vars"]["inventory_hostname"], cluster=node["cluster_name"] ) - output = host.check_output(cmd) - daemons = json.loads(output)["fsmap"]["by_rank"][0]["name"] - assert hostname in daemons + output_raw = host.check_output(cmd) + output_json = json.loads(output_raw) + active_daemon = output_json["fsmap"]["by_rank"][0]["name"] + if active_daemon != hostname: + assert output_json['fsmap']['up:standby'] == 1 diff --git a/tests/functional/tests/mgr/test_mgr.py b/tests/functional/tests/mgr/test_mgr.py index 146ac0062..488c7ccb9 100644 --- a/tests/functional/tests/mgr/test_mgr.py +++ b/tests/functional/tests/mgr/test_mgr.py @@ -39,6 +39,11 @@ class TestMGRs(object): hostname=node["vars"]["inventory_hostname"], cluster=node["cluster_name"] ) - output = host.check_output(cmd) - daemons = json.loads(output)["mgrmap"]["active_name"] - assert hostname in daemons + output_raw = host.check_output(cmd) + output_json = json.loads(output_raw) + daemons = output_json['mgrmap']['active_name'] + standbys = [i['name'] for i in output_json['mgrmap']['standbys']] + result = hostname in daemons + if not result: + result = hostname in standbys + assert result diff --git a/tox.ini b/tox.ini index 7a10920bc..187dc853a 100644 --- a/tox.ini +++ b/tox.ini @@ -217,16 +217,16 @@ commands= ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml - # wait 5 minutes for services to be ready - sleep 300 + # wait 2 minutes for services to be ready + sleep 120 # test cluster state using ceph-ansible tests testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # reboot all vms vagrant reload --no-provision - # wait 5 minutes for services to be ready - sleep 300 + # wait 2 minutes for services to be ready + sleep 120 # retest to ensure cluster came back up correctly after rebooting testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests