Merge pull request #1121 from D3N14L/master

Reviewed all roles and site.yml.sample and added "always_run: true" where appropriate
pull/1123/head
Sébastien Han 2016-11-28 10:26:15 +01:00 committed by GitHub
commit 06c75c6939
55 changed files with 80 additions and 2 deletions

View File

@ -3,6 +3,7 @@
command: subscription-manager identity
register: subscription
changed_when: false
always_run: true
when: ansible_os_family == 'RedHat'
- name: install dependencies

View File

@ -3,6 +3,7 @@
raw: stat $HOME/.python
register: need_python
ignore_errors: true
always_run: true
- include: install_pypy.yml
when: need_python | failed
@ -11,6 +12,7 @@
raw: stat $HOME/.pip
register: need_pip
ignore_errors: true
always_run: true
- include: install_pip.yml
when: need_pip | failed and need_python | failed

View File

@ -5,6 +5,7 @@
failed_when: false
register: nmapexist
run_once: true
always_run: true
- name: inform that nmap is not present
debug:
@ -18,6 +19,7 @@
changed_when: false
failed_when: false
register: monportstate
always_run: true
when:
- mon_group_name in group_names
- nmapexist.rc == 0
@ -35,6 +37,7 @@
changed_when: false
failed_when: false
register: osdrangestate
always_run: true
when:
- osd_group_name in group_names
- nmapexist.rc == 0
@ -52,6 +55,7 @@
changed_when: false
failed_when: false
register: mdsrangestate
always_run: true
when:
- mds_group_name in group_names
- nmapexist.rc == 0
@ -69,6 +73,7 @@
changed_when: false
failed_when: false
register: rgwportstate
always_run: true
when:
- rgw_group_name in group_names
- nmapexist.rc == 0
@ -86,6 +91,7 @@
changed_when: false
failed_when: false
register: nfsportstate
always_run: true
when:
- nfs_group_name in group_names
- nmapexist.rc == 0

View File

@ -270,6 +270,7 @@
stat:
path: /etc/default/ceph
register: etc_default_ceph
always_run: true
when: ansible_os_family == "Debian"
- name: configure cluster name

View File

@ -3,12 +3,14 @@
command: subscription-manager identity
register: subscription
changed_when: false
always_run: true
- name: check if the red hat storage monitor repo is already present
shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms
changed_when: false
failed_when: false
register: rh_storage_mon_repo
always_run: true
when: mon_group_name in group_names
- name: enable red hat storage monitor repository
@ -23,6 +25,7 @@
changed_when: false
failed_when: false
register: rh_storage_osd_repo
always_run: true
when: osd_group_name in group_names
- name: enable red hat storage osd repository
@ -37,6 +40,7 @@
changed_when: false
failed_when: false
register: rh_storage_rgw_repo
always_run: true
when: rgw_group_name in group_names
- name: enable red hat storage rados gateway repository

View File

@ -3,6 +3,7 @@
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
always_run: true
- name: set keys permissions
file:

View File

@ -16,6 +16,7 @@
with_items: "{{ ceph_config_keys }}"
changed_when: false
failed_when: false
always_run: true
register: statleftover
- name: fail if we find existing cluster files

View File

@ -6,6 +6,7 @@
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:

View File

@ -14,6 +14,7 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -4,10 +4,12 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
- name: set fact for using Atomic host
set_fact:

View File

@ -3,6 +3,7 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}

View File

@ -17,6 +17,7 @@
changed_when: false
failed_when: false
register: statleftover
always_run: true
- name: fail if we find existing cluster files
fail:

View File

@ -18,6 +18,7 @@
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
always_run: true
become: false
when: generate_fsid

View File

@ -7,6 +7,7 @@
failed_when: false
run_once: true
register: ceph_version
always_run: true
- set_fact:
after_hamer=True

View File

@ -17,6 +17,7 @@
become: false
failed_when: false
register: statconfig
always_run: true
- name: try to fetch ceph config and keys
copy:

View File

@ -4,10 +4,12 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
- name: set fact for using Atomic host
set_fact:

View File

@ -3,6 +3,7 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}

View File

@ -4,6 +4,7 @@
ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
changed_when: false
failed_when: false
always_run: true
register: rbd_pool_df
- name: check pg num for rbd pool
@ -11,6 +12,7 @@
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
changed_when: false
failed_when: false
always_run: true
register: rbd_pool_pgs
- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured

View File

@ -2,6 +2,7 @@
- name: collect all the pools
command: rados --cluster {{ cluster }} lspools
register: ceph_pools
always_run: true
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.hammer
- name: secure the cluster

View File

@ -14,6 +14,7 @@
with_items: "{{ ceph_config_keys }}"
changed_when: false
failed_when: false
always_run: true
register: statleftover
- name: fail if we find existing cluster files

View File

@ -12,6 +12,7 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
- name: try to fetch config and keys

View File

@ -21,6 +21,7 @@
- name: get user create output
command: docker logs ceph-{{ ansible_hostname }}-rgw-user
always_run: true
register: rgwuser
- name: generate ganesha configuration file

View File

@ -6,6 +6,7 @@
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:

View File

@ -10,6 +10,7 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
- name: try to fetch config and keys

View File

@ -4,10 +4,12 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
- name: set fact for using Atomic host
set_fact:

View File

@ -3,6 +3,7 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}

View File

@ -104,6 +104,7 @@
shell: 'ls /var/lib/ceph/osd/ | grep -oP "\d+$"'
changed_when: false
failed_when: false
always_run: true
register: osd_id
when:
- use_systemd

View File

@ -29,6 +29,7 @@
with_items: "{{ raw_journal_devices|unique }}"
changed_when: false
failed_when: false
always_run: true
register: journal_partition_status
when: raw_multi_journal or dmcrypt_dedicated_journal

View File

@ -4,6 +4,7 @@
with_dict: "{{ ansible_devices }}"
changed_when: false
failed_when: false
always_run: true
register: ispartition_results
when:
- ansible_devices is defined
@ -14,6 +15,7 @@
with_dict: "{{ ansible_devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_partition_status_results
when:
- ansible_devices is defined
@ -37,6 +39,7 @@
with_dict: "{{ ansible_devices }}"
changed_when: false
failed_when: false
always_run: true
register: parted_results
when:
- ansible_devices is defined

View File

@ -4,6 +4,7 @@
with_items: "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: ispartition_results
- name: check the partition status of the osd disks
@ -11,6 +12,7 @@
with_items: "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_partition_status_results
# NOTE: The following calls to sgdisk are retried because sgdisk is known to
@ -31,4 +33,5 @@
with_items: "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: parted_results

View File

@ -16,6 +16,7 @@
with_items: "{{ ceph_config_keys }}"
changed_when: false
failed_when: false
always_run: true
register: statleftover
- name: fail if we find existing cluster files

View File

@ -6,6 +6,7 @@
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:

View File

@ -18,6 +18,7 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
- name: try to copy ceph config and keys

View File

@ -4,6 +4,7 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
- include: checks.yml
when:
@ -14,6 +15,7 @@
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
- name: set fact for using Atomic host
set_fact:

View File

@ -3,6 +3,7 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}

View File

@ -12,6 +12,7 @@
- name: verify if the disk was already prepared
shell: "lsblk -o PARTLABEL {{ item }} | grep -sq 'ceph'"
failed_when: false
always_run: true
with_items: "{{ ceph_osd_docker_devices }}"
register: osd_prepared

View File

@ -4,6 +4,7 @@
with_items: "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_path
when: not osd_directory
@ -12,6 +13,7 @@
with_items: "{{ osd_path.results }}"
changed_when: false
failed_when: false
always_run: true
register: osd_id_non_dir_scenario
when: not osd_directory
@ -20,6 +22,7 @@
with_items: "{{ osd_directories }}"
changed_when: false
failed_when: false
always_run: true
register: osd_id_dir_scenario
when: osd_directory

View File

@ -16,6 +16,7 @@
with_items: "{{ ceph_config_keys }}"
changed_when: false
failed_when: false
always_run: true
register: statleftover
- name: fail if we find existing cluster files

View File

@ -5,6 +5,7 @@
shell: docker inspect "docker.io/{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
always_run: true
run_once: true
register: ceph_version

View File

@ -13,6 +13,7 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -4,9 +4,11 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
- name: check if it is Atomic host
stat: path=/run/ostree-booted
always_run: true
register: stat_ostree
- name: set fact for using Atomic host

View File

@ -3,6 +3,7 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}

View File

@ -4,6 +4,7 @@
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:

View File

@ -12,6 +12,7 @@
become: false
ignore_errors: true
register: statconfig
always_run: true
- name: try to fetch ceph config and keys
copy:

View File

@ -2,6 +2,7 @@
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
- name: set fact for using Atomic host
set_fact:

View File

@ -3,6 +3,7 @@
shell: "pgrep ceph-rest-api"
changed_when: false
failed_when: false
always_run: true
register: restapi_status
- name: start ceph rest api

View File

@ -16,6 +16,7 @@
with_items: "{{ ceph_config_keys }}"
changed_when: false
failed_when: false
always_run: true
register: statleftover
- name: fail if we find existing cluster files

View File

@ -17,6 +17,7 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
when:
- nfs_obj_gw

View File

@ -5,6 +5,7 @@
shell: docker inspect "docker.io/{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}:{{ ceph_rgw_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
always_run: true
run_once: true
register: ceph_version

View File

@ -11,6 +11,7 @@
changed_when: false
become: false
ignore_errors: true
always_run: true
register: statconfig
- name: try to fetch ceph config and keys

View File

@ -4,9 +4,11 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
- name: check if it is Atomic host
stat: path=/run/ostree-booted
always_run: true
register: stat_ostree
- name: set fact for using Atomic host

View File

@ -3,6 +3,7 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
- name: set selinux permissions
shell: chcon -Rt svirt_sandbox_file_t {{ item }}

View File

@ -4,21 +4,25 @@
register: realmcheck
failed_when: False
changed_when: False
always_run: True
- name: check if the zonegroup already exists
command: radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}
register: zonegroupcheck
failed_when: False
changed_when: False
always_run: True
- name: check if the zone already exists
command: radosgw-admin zone get --rgw-zone={{ rgw_zone }}
register: zonecheck
failed_when: False
changed_when: False
always_run: True
- name: check if the system user already exists
command: radosgw-admin user info --uid=zone.user
register: usercheck
failed_when: False
changed_when: False
always_run: True