Revert "Failed_when and stat module"

pull/356/head
Leseb 2015-07-29 19:14:04 +02:00
parent 664d133f47
commit 5aa3d3729f
19 changed files with 37 additions and 37 deletions

View File

@ -21,7 +21,7 @@
stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
register: migration_completed
failed_when: false
ignore_errors: True
- name: Check for failed run
stat: >
@ -248,7 +248,7 @@
stat: >
path=/var/lib/ceph/migration_completed
register: migration_completed
failed_when: false
ignore_errors: True
- name: Check for failed run
stat: >
@ -286,13 +286,13 @@
- name: Check if sysvinit
shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
register: osdsysvinit
failed_when: false
ignore_errors: True
changed_when: False
- name: Check if upstart
shell: stat /var/lib/ceph/osd/ceph-*/upstart
register: osdupstart
failed_when: false
ignore_errors: True
changed_when: False
- name: Archive ceph configs
@ -443,7 +443,7 @@
stat: >
path=/var/lib/ceph/radosgw/migration_completed
register: migration_completed
failed_when: false
ignore_errors: True
- name: Check for failed run
stat: >

View File

@ -50,7 +50,7 @@
tags: update
command: >
git remote add upstream git@github.com:ceph/ceph-ansible.git
failed_when: false
ignore_errors: yes
- name: pulles the newest changes from upstream
tags: update

View File

@ -15,12 +15,12 @@
- name: disk zap
command: /usr/sbin/sgdisk --zap-all --clear --mbrtogpt -g -- {{ item }}
with_items: devices
failed_when: false
ignore_errors: true
- name: disk zap
command: /usr/sbin/sgdisk --zap-all --clear --mbrtogpt -g -- {{ item }}
with_items: devices
failed_when: false
ignore_errors: true
- name: call partprobe
command: partprobe
@ -31,7 +31,7 @@
- name: remove osd data
shell: rm -rf /var/lib/ceph/osd/*/*
failed_when: false
ignore_errors: true
- name: purge remaining data
command: ceph-deploy purgedata {{ ansible_fqdn }}

View File

@ -26,15 +26,15 @@
when: ansible_os_family == 'Debian'
- name: check for a ceph socket
stat: path=/var/run/ceph/*.asok
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
changed_when: false
failed_when: false
ignore_errors: true
register: socket
- name: check for a rados gateway socket
stat: path={{ rbd_client_admin_socket_path }}*.asok
shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
changed_when: false
failed_when: false
ignore_errors: true
register: socketrgw
- name: create a local fetch directory if it doesn't exist

View File

@ -2,18 +2,18 @@
- name: disable osd directory parsing by updatedb
command: updatedb -e /var/lib/ceph
changed_when: false
failed_when: false
ignore_errors: true
- name: disable transparent hugepage
command: "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
changed_when: false
failed_when: false
ignore_errors: true
when: disable_transparent_hugepage
- name: disable swap
command: swapoff -a
changed_when: false
failed_when: false
ignore_errors: true
when: disable_swap
- name: apply operating system tuning

View File

@ -7,7 +7,7 @@
- name: check if the red hat storage monitor repo is already present
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-mon-rpms
changed_when: false
failed_when: false
ignore_errors: true
register: rh_storage_mon_repo
when: mon_group_name in group_names
@ -21,7 +21,7 @@
- name: check if the red hat storage osd repo is already present
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-osd-rpms
changed_when: false
failed_when: false
ignore_errors: true
register: rh_storage_osd_repo
when: osd_group_name in group_names
@ -35,7 +35,7 @@
- name: check if the red hat storage rados gateway repo is already present
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-tools-rpms
changed_when: false
failed_when: false
ignore_errors: true
register: rh_storage_rgw_repo
when: rgw_group_name in group_names

View File

@ -13,7 +13,7 @@
with_items: ceph_config_keys
changed_when: false
sudo: false
failed_when: false
ignore_errors: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -30,7 +30,7 @@
with_items: ceph_config_keys
changed_when: false
sudo: false
failed_when: false
ignore_errors: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -15,7 +15,7 @@
with_items: ceph_config_keys
changed_when: false
sudo: false
failed_when: false
ignore_errors: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -7,7 +7,7 @@
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
changed_when: false
failed_when: false
ignore_errors: true
- name: create openstack keys
command: >

View File

@ -9,7 +9,7 @@
- ispartition.results
- devices
changed_when: false
failed_when: false
ignore_errors: true
when:
item.0.rc == 0 and
item.1.rc != 0
@ -21,7 +21,7 @@
- ispartition.results
- devices
changed_when: false
failed_when: false
ignore_errors: true
when: item.0.rc == 0
- include: osd_fragment.yml

View File

@ -2,18 +2,18 @@
# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
# it should exist we rc=0 and don't do anything unless we do something like --force
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "failed_when: false"
# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: true"
# I believe it's safer
- name: check if the device is a partition or a disk
shell: "echo '{{ item }}' | egrep '/dev/(([a-z]{3,4}[0-9]$)|(cciss/c[0-9]{1}d[0-9]{1}p[0-9]$))'"
with_items: devices
changed_when: false
failed_when: false
ignore_errors: true
register: ispartition
- name: if partition named 'ceph' exists
shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
with_items: devices
changed_when: false
failed_when: false
ignore_errors: true
register: parted

View File

@ -10,7 +10,7 @@
with_items: ceph_config_keys
changed_when: false
sudo: false
failed_when: false
ignore_errors: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -3,14 +3,14 @@
shell: "df | grep {{ item }} | awk '{print $6}'"
with_items: devices
changed_when: false
failed_when: false
ignore_errors: true
register: osd_path
- name: get osd id
command: cat {{ item.stdout }}/whoami
with_items: osd_path.results
changed_when: false
failed_when: false
ignore_errors: true
register: osd_id
- name: create a ceph fragment and assemble directory

View File

@ -18,7 +18,7 @@
stat: >
path={{ item }}
with_items: ceph_config_keys
failed_when: false
ignore_errors: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -2,7 +2,7 @@
- name: check if ceph rest api is already started
shell: "pgrep ceph-rest-api"
changed_when: false
failed_when: false
ignore_errors: true
register: restapi_status
- name: start ceph rest api

View File

@ -131,7 +131,7 @@
- a2ensite rgw.conf
- a2dissite *default
changed_when: false
failed_when: false
ignore_errors: true
notify:
- restart apache2

View File

@ -2,7 +2,7 @@
- name: check if rgw is started
command: /etc/init.d/radosgw status
register: rgwstatus
failed_when: false
ignore_errors: true
- name: start rgw
command: /etc/init.d/radosgw start

View File

@ -92,12 +92,12 @@
- name: Check if sysvinit
shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
register: osdsysvinit
failed_when: false
ignore_errors: True
- name: Check if upstart
shell: stat /var/lib/ceph/osd/ceph-*/upstart
register: osdupstart
failed_when: false
ignore_errors: True
- name: Gracefully stop the OSDs (Upstart)
service: >