mirror of https://github.com/ceph/ceph-ansible.git
lint: ignore 302,303,505 errors
ignore 302,303 and 505 errors [302] Using command rather than an argument to e.g. file [303] Using command rather than module [505] referenced files must exist they aren't relevant on these tasks. Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/6060/head
parent
c948b668eb
commit
195d88fcda
|
@ -339,7 +339,7 @@
|
|||
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
|
||||
|
||||
- name: reset failed ceph-mon systemd unit
|
||||
command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}'
|
||||
command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}' # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: containerized_deployment | bool
|
||||
|
@ -384,7 +384,7 @@
|
|||
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
|
||||
|
||||
- name: reset failed ceph-mgr systemd unit
|
||||
command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}'
|
||||
command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}' # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: containerized_deployment | bool
|
||||
|
@ -554,7 +554,7 @@
|
|||
when: not containerized_deployment | bool
|
||||
|
||||
- name: reset failed ceph-mds systemd unit
|
||||
command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}'
|
||||
command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}' # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: containerized_deployment | bool
|
||||
|
@ -653,7 +653,7 @@
|
|||
when: not containerized_deployment | bool
|
||||
|
||||
- name: reset failed ceph-radosgw systemd unit
|
||||
command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
|
||||
command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}' # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: '{{ rgw_instances }}'
|
||||
|
@ -721,7 +721,7 @@
|
|||
when: not containerized_deployment | bool
|
||||
|
||||
- name: reset failed rbd-mirror systemd unit
|
||||
command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
|
||||
command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}' # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: containerized_deployment | bool
|
||||
|
@ -774,7 +774,7 @@
|
|||
- tcmu-runner
|
||||
|
||||
- name: reset failed iscsigw systemd units
|
||||
command: 'systemctl reset-failed {{ item }}'
|
||||
command: 'systemctl reset-failed {{ item }}' # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
with_items:
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
- name: include vars of lv_vars.yaml
|
||||
include_vars:
|
||||
file: lv_vars.yaml
|
||||
file: lv_vars.yaml # noqa 505
|
||||
failed_when: false
|
||||
|
||||
# ensure nvme_device is set
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
- name: include vars of lv_vars.yaml
|
||||
include_vars:
|
||||
file: lv_vars.yaml
|
||||
file: lv_vars.yaml # noqa 505
|
||||
failed_when: false
|
||||
|
||||
# need to check if lvm2 is installed
|
||||
|
|
|
@ -375,7 +375,7 @@
|
|||
delegate_to: localhost
|
||||
|
||||
- name: remove data
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
shell: rm -rf /var/lib/ceph/* # noqa 302
|
||||
|
||||
tasks:
|
||||
|
||||
|
@ -738,7 +738,7 @@
|
|||
listen: "remove data"
|
||||
|
||||
- name: remove data
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
shell: rm -rf /var/lib/ceph/* # noqa 302
|
||||
listen: "remove data"
|
||||
|
||||
tasks:
|
||||
|
@ -841,7 +841,7 @@
|
|||
state: absent
|
||||
|
||||
- name: clean apt
|
||||
command: apt-get clean
|
||||
command: apt-get clean # noqa 303
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: purge ceph repo file in /etc/yum.repos.d
|
||||
|
|
|
@ -267,8 +267,7 @@
|
|||
name: ceph-facts
|
||||
|
||||
- name: get all the running osds
|
||||
shell: |
|
||||
systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service"
|
||||
shell: systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service" # noqa 303
|
||||
register: osd_units
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
@ -671,13 +670,13 @@
|
|||
- "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
|
||||
|
||||
- name: remove ceph data
|
||||
shell: rm -rf /var/lib/ceph/*
|
||||
shell: rm -rf /var/lib/ceph/* # noqa 302
|
||||
changed_when: false
|
||||
|
||||
# (todo): remove this when we are able to manage docker
|
||||
# service on atomic host.
|
||||
- name: remove docker data
|
||||
shell: rm -rf /var/lib/docker/*
|
||||
shell: rm -rf /var/lib/docker/* # noqa 302
|
||||
when: not is_atomic | bool
|
||||
|
||||
- name: purge fetch directory
|
||||
|
|
|
@ -111,7 +111,7 @@
|
|||
failed_when: false
|
||||
|
||||
- name: ensure that the mds is stopped
|
||||
command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}"
|
||||
command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa 303
|
||||
register: mds_to_kill_status
|
||||
failed_when: mds_to_kill_status.rc == 0
|
||||
delegate_to: "{{ mds_to_kill }}"
|
||||
|
|
|
@ -106,7 +106,7 @@
|
|||
failed_when: false
|
||||
|
||||
- name: ensure that the mgr is stopped
|
||||
command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}"
|
||||
command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa 303
|
||||
register: mgr_to_kill_status
|
||||
failed_when: mgr_to_kill_status.rc == 0
|
||||
delegate_to: "{{ mgr_to_kill }}"
|
||||
|
|
|
@ -106,7 +106,7 @@
|
|||
failed_when: false
|
||||
|
||||
- name: ensure that the rgw is stopped
|
||||
command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}"
|
||||
command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa 303
|
||||
register: rgw_to_kill_status
|
||||
failed_when: rgw_to_kill_status.rc == 0
|
||||
changed_when: false
|
||||
|
|
|
@ -245,7 +245,7 @@
|
|||
|
||||
# systemd module does not support --runtime option
|
||||
- name: disable ceph-osd@.service runtime-enabled
|
||||
command: "systemctl disable --runtime {{ item }}"
|
||||
command: "systemctl disable --runtime {{ item }}" # noqa 303
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
with_items: "{{ running_osds.stdout_lines | default([]) }}"
|
||||
|
|
Loading…
Reference in New Issue