Avoid deprecated always_run

The `always_run` key is deprecated and being removed in Ansible 2.4.
Using it causes a warning to be displayed:

    [DEPRECATION WARNING]: always_run is deprecated.

This patch changes all instances of `always_run` to use the `always`
tag, which causes the task to run each time the playbook runs.
pull/2037/head
Major Hayden 2017-10-12 08:29:41 -05:00
parent 002b0341d0
commit 620fb37dd4
No known key found for this signature in database
GPG Key ID: 737051E0C1011FB1
44 changed files with 169 additions and 85 deletions

View File

@ -3,7 +3,8 @@
raw: stat $HOME/.python
register: need_python
ignore_errors: true
always_run: true
tags:
- always
- include: install_pypy.yml
when: need_python | failed
@ -12,7 +13,8 @@
raw: stat $HOME/.pip
register: need_pip
ignore_errors: true
always_run: true
tags:
- always
- include: install_pip.yml
when: need_pip | failed and need_python | failed

View File

@ -5,7 +5,8 @@
failed_when: false
register: nmapexist
run_once: true
always_run: true
tags:
- always
- name: inform that nmap is not present
debug:
@ -19,10 +20,11 @@
changed_when: false
failed_when: false
register: monportstate
always_run: true
when:
- mon_group_name in group_names
- nmapexist.rc == 0
tags:
- always
- name: fail if monitor port is filtered
fail:
@ -37,10 +39,11 @@
changed_when: false
failed_when: false
register: osdrangestate
always_run: true
when:
- osd_group_name in group_names
- nmapexist.rc == 0
tags:
- always
- name: fail if osd and mds range is filtered (osd hosts)
fail:
@ -55,10 +58,11 @@
changed_when: false
failed_when: false
register: mdsrangestate
always_run: true
when:
- mds_group_name in group_names
- nmapexist.rc == 0
tags:
- always
- name: fail if osd and mds range is filtered (mds hosts)
fail:
@ -73,10 +77,11 @@
changed_when: false
failed_when: false
register: rgwportstate
always_run: true
when:
- rgw_group_name in group_names
- nmapexist.rc == 0
tags:
- always
- name: fail if rados gateway port is filtered
fail:
@ -91,10 +96,11 @@
changed_when: false
failed_when: false
register: nfsportstate
always_run: true
when:
- nfs_group_name in group_names
- nmapexist.rc == 0
tags:
- always
- name: fail if NFS ports are filtered
fail:

View File

@ -3,5 +3,6 @@
command: rpm -q chrony
register: ntp_pkg_query
ignore_errors: true
always_run: true
changed_when: false
tags:
- always

View File

@ -3,10 +3,11 @@
command: dpkg -s ntp
register: ntp_pkg_query
ignore_errors: true
always_run: true
changed_when: false
when:
- ansible_os_family == 'Debian'
tags:
- always
- name: install ntp on debian
package:

View File

@ -3,10 +3,11 @@
command: rpm -q ntp
register: ntp_pkg_query
ignore_errors: true
always_run: true
changed_when: false
when:
- ansible_os_family == 'RedHat'
tags:
- always
- name: install ntp on redhat
package:

View File

@ -30,11 +30,12 @@
register: subscription
changed_when: false
failed_when: false
always_run: true
when:
- ansible_distribution == 'Red Hat Enterprise Linux'
- ceph_repository == 'rhcs'
- ceph_repository_type == 'cdn'
tags:
- always
- name: fail on unregistered red hat rhcs linux
fail:

View File

@ -22,9 +22,10 @@
stat:
path: /etc/default/ceph
register: etc_default_ceph
always_run: true
when:
- ansible_os_family == "Debian"
tags:
- always
- name: configure cluster name
lineinfile:

View File

@ -4,9 +4,10 @@
changed_when: false
failed_when: false
register: rhcs_mon_repo
always_run: true
when:
- mon_group_name in group_names
tags:
- always
- name: enable red hat storage monitor repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms
@ -20,9 +21,10 @@
changed_when: false
failed_when: false
register: rhcs_osd_repo
always_run: true
when:
- osd_group_name in group_names
tags:
- always
- name: enable red hat storage osd repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms
@ -36,9 +38,10 @@
changed_when: false
failed_when: false
register: rhcs_rgw_mds_nfs_repo
always_run: true
when:
- (rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names)
tags:
- always
- name: enable red hat storage rados gateway / mds repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms

View File

@ -65,8 +65,9 @@
- name: get ceph version
command: ceph --version
changed_when: false
always_run: yes
register: ceph_version
tags:
- always
- name: set_fact ceph_version
set_fact:

View File

@ -33,8 +33,9 @@
command: sysctl -b vm.min_free_kbytes
changed_when: false
failed_when: false
always_run: yes
register: default_vm_min_free_kbytes
tags:
- always
- name: set_fact vm_min_free_kbytes
set_fact:

View File

@ -87,10 +87,11 @@
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
changed_when: false
register: cluster_uuid
always_run: true
become: false
when:
- generate_fsid
tags:
- always
- name: ensure /etc/ceph exists
file:

View File

@ -4,60 +4,67 @@
register: ceph_mon_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(mon_group_name, [])
tags:
- always
- name: check for an osd container
command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
register: ceph_osd_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(osd_group_name, [])
tags:
- always
- name: check for a mds container
command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
register: ceph_mds_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(mds_group_name, [])
tags:
- always
- name: check for a rgw container
command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
register: ceph_rgw_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(rgw_group_name, [])
tags:
- always
- name: check for a mgr container
command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
register: ceph_mgr_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(mgr_group_name, [])
tags:
- always
- name: check for a rbd mirror container
command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
register: ceph_rbd_mirror_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
tags:
- always
- name: check for a nfs container
command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
register: ceph_nfs_container_stat
changed_when: false
failed_when: false
always_run: true
when:
- inventory_hostname in groups.get(nfs_group_name, [])
tags:
- always

View File

@ -3,20 +3,22 @@
shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
changed_when: false
failed_when: false
always_run: true
register: mon_socket_stat
when:
- inventory_hostname in groups.get(mon_group_name, [])
tags:
- always
- name: check if the ceph mon socket is in-use
command: fuser --silent {{ mon_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: mon_socket
when:
- inventory_hostname in groups.get(mon_group_name, [])
- mon_socket_stat.rc == 0
tags:
- always
- name: remove ceph mon socket if exists and not used by a process
file:
@ -32,20 +34,22 @@
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-osd*.asok
changed_when: false
failed_when: false
always_run: true
register: osd_socket_stat
when:
- inventory_hostname in groups.get(osd_group_name, [])
tags:
- always
- name: check if the ceph osd socket is in-use
command: fuser --silent {{ osd_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: osd_socket
when:
- inventory_hostname in groups.get(osd_group_name, [])
- osd_socket_stat.rc == 0
tags:
- always
- name: remove ceph osd socket if exists and not used by a process
file:
@ -61,20 +65,22 @@
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mds*.asok
changed_when: false
failed_when: false
always_run: true
register: mds_socket_stat
when:
- inventory_hostname in groups.get(mds_group_name, [])
tags:
- always
- name: check if the ceph mds socket is in-use
command: fuser --silent {{ mds_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: mds_socket
when:
- inventory_hostname in groups.get(mds_group_name, [])
- mds_socket_stat.rc == 0
tags:
- always
- name: remove ceph mds socket if exists and not used by a process
file:
@ -90,20 +96,22 @@
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rgw*.asok
changed_when: false
failed_when: false
always_run: true
register: rgw_socket_stat
when:
- inventory_hostname in groups.get(rgw_group_name, [])
tags:
- always
- name: check if the ceph rgw socket is in-use
command: fuser --silent {{ rgw_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: rgw_socket
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_socket_stat.rc == 0
tags:
- always
- name: remove ceph rgw socket if exists and not used by a process
file:
@ -119,20 +127,22 @@
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mgr*.asok
changed_when: false
failed_when: false
always_run: true
register: mgr_socket_stat
when:
- inventory_hostname in groups.get(mgr_group_name, [])
tags:
- always
- name: check if the ceph mgr socket is in-use
command: fuser --silent {{ mgr_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: mgr_socket
when:
- inventory_hostname in groups.get(mgr_group_name, [])
- mgr_socket_stat.rc == 0
tags:
- always
- name: remove ceph mgr socket if exists and not used by a process
file:
@ -148,20 +158,22 @@
stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rbd-mirror*.asok
changed_when: false
failed_when: false
always_run: true
register: rbd_mirror_socket_stat
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
tags:
- always
- name: check if the ceph rbd mirror socket is in-use
command: fuser --silent {{ rbd_mirror_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: rbd_mirror_socket
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
- rbd_mirror_socket_stat.rc == 0
tags:
- always
- name: remove ceph rbd mirror socket if exists and not used by a process
file:
@ -176,20 +188,22 @@
command: stat --printf=%n /var/run/ganesha.pid
changed_when: false
failed_when: false
always_run: true
register: nfs_socket_stat
when:
- inventory_hostname in groups.get(nfs_group_name, [])
tags:
- always
- name: check if the ceph nfs ganesha socket is in-use
command: fuser --silent {{ nfs_socket_stat.stdout }}
changed_when: false
failed_when: false
always_run: true
register: nfs_socket
when:
- inventory_hostname in groups.get(nfs_group_name, [])
- nfs_socket_stat.rc == 0
tags:
- always
- name: remove ceph nfs ganesha socket if exists and not used by a process
file:

View File

@ -18,12 +18,13 @@
command: "{{ docker_exec_cmd }} ceph --connect-timeout 3 --cluster {{ cluster }} fsid"
changed_when: false
failed_when: false
always_run: yes
register: ceph_current_fsid
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- not rolling_update
- groups.get(mon_group_name, []) | length > 0
tags:
- always
# We want this check to be run only on the first node
- name: check if {{ fetch_directory }} directory exists
@ -82,9 +83,10 @@
changed_when: false
register: cluster_uuid
become: false
always_run: true
when:
- generate_fsid
tags:
- always
- name: set_fact fsid
set_fact:

View File

@ -3,5 +3,6 @@
command: rpm -q chrony
register: ntp_pkg_query
ignore_errors: true
always_run: true
changed_when: false
tags:
- always

View File

@ -3,10 +3,11 @@
command: dpkg -s ntp
register: ntp_pkg_query
ignore_errors: true
always_run: true
changed_when: false
when:
- ansible_os_family == 'Debian'
tags:
- always
- name: install ntp on debian
package:

View File

@ -3,10 +3,11 @@
command: rpm -q ntp
register: ntp_pkg_query
ignore_errors: true
always_run: true
changed_when: false
when:
- ansible_os_family == 'RedHat'
tags:
- always
- name: install ntp on redhat
package:

View File

@ -6,7 +6,8 @@
stat:
path: /run/ostree-booted
register: stat_ostree
always_run: true
tags:
- always
- name: set_fact is_atomic
set_fact:
@ -33,8 +34,9 @@
- name: get docker version
command: docker --version
changed_when: false
always_run: yes
register: docker_version
tags:
- always
- name: set_fact docker_version docker_version.stdout.split
set_fact:
@ -51,7 +53,8 @@
register: ceph_health
changed_when: false
failed_when: false
always_run: true
tags:
- always
- name: include checks.yml
include: checks.yml
@ -89,8 +92,9 @@
- name: get ceph version
command: docker run --entrypoint /usr/bin/ceph {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --version
changed_when: false
always_run: yes
register: ceph_version
tags:
- always
- name: set_fact ceph_version ceph_version.stdout.split
set_fact:

View File

@ -3,4 +3,5 @@
command: getenforce
register: sestatus
changed_when: false
always_run: true
tags:
- always

View File

@ -34,5 +34,6 @@
become: false
failed_when: false
register: statconfig
always_run: true
tags:
- always

View File

@ -3,7 +3,8 @@
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
always_run: true
tags:
- always
- name: set keys permissions
file:

View File

@ -12,8 +12,9 @@
with_items: "{{ crt_files }}"
changed_when: false
failed_when: false
always_run: true
register: crt_files_exist
tags:
- always
- name: try to fetch crt file(s)
copy:

View File

@ -14,8 +14,9 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
tags:
- always
- name: try to fetch ceph config and keys
copy:

View File

@ -11,8 +11,9 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
tags:
- always
- name: try to fetch ceph config and keys
copy:
@ -30,9 +31,10 @@
- name: "copy mgr key to /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring"
command: cp /etc/ceph/{{ cluster }}.mgr.{{ ansible_hostname }}.keyring /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring
changed_when: false
always_run: true
with_items: "{{ statconfig.results }}"
when: item.stat.exists == true
tags:
- always
- name: set ceph mgr key permission
file:

View File

@ -4,18 +4,20 @@
args:
creates: /etc/ceph/{{ cluster }}.client.admin.keyring
changed_when: false
always_run: true
when:
- cephx
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
tags:
- always
- name: collect admin and bootstrap keys
command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
changed_when: false
always_run: true
when:
- cephx
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
tags:
- always
# NOTE (leseb): wait for mon discovery and quorum resolution
# the admin key is not instantaneously created so we have to wait a bit
@ -42,19 +44,21 @@
command: ceph --cluster {{ cluster }} config-key get initial_mon_keyring
changed_when: false
ignore_errors: true
always_run: true
run_once: true
failed_when: false
register: is_initial_mon_keyring_in_kv
tags:
- always
- name: put initial mon keyring in mon kv store
command: ceph --cluster {{ cluster }} config-key put initial_mon_keyring {{ monitor_keyring.stdout }}
changed_when: false
always_run: true
run_once: true
when:
- is_initial_mon_keyring_in_kv.rc != 0
- cephx
tags:
- always
- name: create ceph rest api keyring when mon is not containerized
command: ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
@ -87,9 +91,10 @@
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
always_run: true
when:
- cephx
tags:
- always
- name: set keys permissions
file:

View File

@ -12,8 +12,9 @@
changed_when: false
register: monitor_keyring
become: false
always_run: true
when: cephx
tags:
- always
- name: create monitor initial keyring
command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *'

View File

@ -47,7 +47,8 @@
become: false
failed_when: false
register: statconfig
always_run: true
tags:
- always
- name: try to copy ceph config and keys
copy:

View File

@ -110,9 +110,10 @@
become: false
failed_when: false
register: stat_mgr_keys
always_run: true
when:
- "{{ groups.get(mgr_group_name, []) | length > 0 }}"
tags:
- always
- name: fetch ceph mgr key(s)
fetch:

View File

@ -2,7 +2,8 @@
- name: collect all the pools
command: rados --cluster {{ cluster }} lspools
register: ceph_pools
always_run: true
tags:
- always
- name: secure the cluster
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true

View File

@ -4,7 +4,8 @@
register: selinuxstatus
changed_when: false
failed_when: false
always_run: true
tags:
- always
- name: install policycoreutils-python to get semanage
package:

View File

@ -10,8 +10,9 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
tags:
- always
- name: try to fetch config and keys
copy:

View File

@ -42,10 +42,11 @@
- "{{ dedicated_devices|unique }}"
changed_when: false
failed_when: false
always_run: true
register: journal_partition_status
when:
- osd_scenario == 'non-collocated'
tags:
- always
- name: fix partitions gpt header or labels of the journal device(s)
shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"

View File

@ -5,8 +5,9 @@
- "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_partition_status_results
tags:
- always
# NOTE: The following calls to sgdisk are retried because sgdisk is known to
# fully wipe a device the first time around. There is no need to halt execution

View File

@ -15,8 +15,9 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
tags:
- always
- name: try to copy ceph config and keys
copy:

View File

@ -40,8 +40,9 @@
- "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: parted_results
tags:
- always
- name: include copy_configs.yml
include: copy_configs.yml

View File

@ -4,16 +4,18 @@
with_items: "{{ devices }}"
changed_when: false
failed_when: false
always_run: true
register: osd_path
tags:
- always
- name: get osd id
command: cat {{ item.stdout }}/whoami
with_items: "{{ osd_path.results }}"
changed_when: false
failed_when: false
always_run: true
register: osd_id_non_dir_scenario
tags:
- always
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297

View File

@ -4,10 +4,11 @@
ls /var/lib/ceph/osd/ | sed 's/.*-//'
changed_when: false
failed_when: false
always_run: true
register: osd_id
until: osd_id.stdout_lines|length == devices|unique|length
retries: 10
tags:
- always
- name: ensure systemd service override directory exists
file:

View File

@ -17,9 +17,10 @@
changed_when: false
become: false
failed_when: false
always_run: true
register: statconfig
when: "item | length > 0"
tags:
- always
- name: try to fetch ceph config and keys
copy:

View File

@ -11,7 +11,8 @@
become: false
ignore_errors: true
register: statconfig
always_run: true
tags:
- always
- name: try to fetch ceph config and keys
copy:

View File

@ -3,8 +3,9 @@
shell: "pgrep -f ceph-rest-api"
changed_when: false
failed_when: false
always_run: true
register: restapi_status
tags:
- always
- name: start ceph rest api
shell: "nohup ceph-rest-api --conf /etc/ceph/{{ cluster }}.conf &"

View File

@ -10,8 +10,9 @@
changed_when: false
become: false
ignore_errors: true
always_run: true
register: statconfig
tags:
- always
- name: try to fetch ceph config and keys
copy:

View File

@ -4,25 +4,29 @@
register: realmcheck
failed_when: False
changed_when: False
always_run: True
tags:
- always
- name: check if the zonegroup already exists
command: radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}
register: zonegroupcheck
failed_when: False
changed_when: False
always_run: True
tags:
- always
- name: check if the zone already exists
command: radosgw-admin zone get --rgw-zone={{ rgw_zone }}
register: zonecheck
failed_when: False
changed_when: False
always_run: True
tags:
- always
- name: check if the system user already exists
command: radosgw-admin user info --uid=zone.user
register: usercheck
failed_when: False
changed_when: False
always_run: True
tags:
- always

View File

@ -53,7 +53,8 @@
stat:
path: /run/ostree-booted
register: stat_ostree
always_run: true
tags:
- always
- name: set fact for using Atomic host
set_fact:
@ -125,7 +126,7 @@
- name: set MTU on eth2
command: "ifconfig eth2 mtu 1400 up"
- hosts: mdss:rgws:clients
- hosts: mdss:rgws:clients
gather_facts: false
become: yes
tasks:

View File

@ -8,7 +8,8 @@
- name: check if it is Atomic host
stat: path=/run/ostree-booted
register: stat_ostree
always_run: true
tags:
- always
- name: set fact for using Atomic host
set_fact: