Merge pull request #1315 from ceph/rolling-update-tests

adds a testing scenario for rolling updates
pull/1298/merge
Sébastien Han 2017-02-21 15:53:57 -05:00 committed by GitHub
commit 661a9d0cdf
6 changed files with 42 additions and 14 deletions

View File

@ -410,3 +410,7 @@ dummy:
# Set this to true to enable Object access via NFS. Requires an RGW role. # Set this to true to enable Object access via NFS. Requires an RGW role.
#nfs_obj_gw: false #nfs_obj_gw: false
# this is only here for usage with the rolling_update.yml playbook
# do not ever change this here
#rolling_update: false

View File

@ -78,7 +78,7 @@
name: ceph-mon name: ceph-mon
state: stopped state: stopped
args: id={{ ansible_hostname }} args: id={{ ansible_hostname }}
when: ansible_service_mgr == 'systemd' when: ansible_service_mgr == 'upstart'
- name: stop ceph mons with sysvinit - name: stop ceph mons with sysvinit
service: service:

View File

@ -285,7 +285,7 @@ mds_max_mds: 3
# #
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls #radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)" radosgw_civetweb_port: 8080
radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]" radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
radosgw_civetweb_num_threads: 50 radosgw_civetweb_num_threads: 50
# For additional civetweb configuration options available such as SSL, logging, # For additional civetweb configuration options available such as SSL, logging,
@ -401,3 +401,7 @@ mon_containerized_default_ceph_conf_with_kv: false
nfs_file_gw: true nfs_file_gw: true
# Set this to true to enable Object access via NFS. Requires an RGW role. # Set this to true to enable Object access via NFS. Requires an RGW role.
nfs_obj_gw: false nfs_obj_gw: false
# this is only here for usage with the rolling_update.yml playbook
# do not ever change this here
rolling_update: false

View File

@ -5,6 +5,9 @@
always_run: yes always_run: yes
register: ceph_version register: ceph_version
# this task shouldn't run in a rolling_update situation
# because it blindly picks a mon, which may be down because
# of the rolling update
- name: is ceph running already? - name: is ceph running already?
command: ceph --connect-timeout 3 --cluster {{ cluster }} fsid command: ceph --connect-timeout 3 --cluster {{ cluster }} fsid
changed_when: false changed_when: false
@ -12,6 +15,14 @@
always_run: yes always_run: yes
register: ceph_current_fsid register: ceph_current_fsid
delegate_to: "{{ groups[mon_group_name][0] }}" delegate_to: "{{ groups[mon_group_name][0] }}"
when: not rolling_update
# set this as a default when performing a rolling_update
# so the rest of the tasks here will succeed
- set_fact:
ceph_current_fsid:
rc: 1
when: rolling_update
- name: create a local fetch directory if it does not exist - name: create a local fetch directory if it does not exist
local_action: file path={{ fetch_directory }} state=directory local_action: file path={{ fetch_directory }} state=directory
@ -22,7 +33,8 @@
- set_fact: - set_fact:
fsid: "{{ ceph_current_fsid.stdout }}" fsid: "{{ ceph_current_fsid.stdout }}"
when: ceph_current_fsid.rc == 0 when:
- ceph_current_fsid.rc == 0
- set_fact: - set_fact:
monitor_name: "{{ ansible_hostname }}" monitor_name: "{{ ansible_hostname }}"

View File

@ -6,19 +6,15 @@
when: ceph_stable when: ceph_stable
- name: add ceph stable repository - name: add ceph stable repository
package: yum_repository:
name: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm" name: ceph_stable
description: Ceph Stable repo
gpgcheck: yes
state: present state: present
changed_when: false gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/$basearch"
when: ceph_stable when: ceph_stable
- name: change download url to ceph mirror
replace:
name: /etc/yum.repos.d/ceph.repo
regexp: http://download.ceph.com
replace: "{{ ceph_mirror }}"
when: ceph_mirror != "http://download.ceph.com"
# we must use curl instead of ansible's uri module because SNI support in # we must use curl instead of ansible's uri module because SNI support in
# Python is only available in 2.7.9 and later, and most supported distributions # Python is only available in 2.7.9 and later, and most supported distributions
# don't have that version, so a request to https fails. # don't have that version, so a request to https fails.

14
tox.ini
View File

@ -1,5 +1,5 @@
[tox] [tox]
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt} envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,update_dmcrypt}
skipsdist = True skipsdist = True
# extra commands for purging clusters # extra commands for purging clusters
@ -15,6 +15,16 @@ commands=
# test that the cluster can be redeployed in a healthy state # test that the cluster can be redeployed in a healthy state
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
# extra commands for performing a rolling update
# currently this hardcodes the release to kraken
# as we're still installing jewel by default
[update]
commands=
cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml --extra-vars="ceph_stable_release=kraken ireallymeanit=yes fetch_directory={changedir}/fetch"
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
[testenv] [testenv]
whitelist_externals = whitelist_externals =
vagrant vagrant
@ -48,6 +58,7 @@ changedir=
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
update_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
commands= commands=
vagrant up --no-provision {posargs:--provider=virtualbox} vagrant up --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
@ -59,5 +70,6 @@ commands=
purge_cluster: {[purge]commands} purge_cluster: {[purge]commands}
purge_dmcrypt: {[purge]commands} purge_dmcrypt: {[purge]commands}
update_dmcrypt: {[update]commands}
vagrant destroy --force vagrant destroy --force