mirror of https://github.com/ceph/ceph-ansible.git
syntax: change local_action syntax
Use a nicer syntax for `local_action` tasks. We used to have oneliner like this: ``` local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500 }} ``` The usual syntax: ``` local_action: module: wait_for port: 22 host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" state: started delay: 10 timeout: 500 ``` is nicer and kind of way to keep consistency regarding the whole playbook. This also fix a potential issue about missing quotation : ``` Traceback (most recent call last): File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 213, in <module> main() File "/tmp/ansible_wQtWsi/ansible_module_command.py", line 185, in main rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin) File "/tmp/ansible_wQtWsi/ansible_modlib.zip/ansible/module_utils/basic.py", line 2710, in run_command File "/usr/lib64/python2.7/shlex.py", line 279, in split return list(lex) File "/usr/lib64/python2.7/shlex.py", line 269, in next token = self.get_token() File "/usr/lib64/python2.7/shlex.py", line 96, in get_token raw = self.read_token() File "/usr/lib64/python2.7/shlex.py", line 172, in read_token raise ValueError, "No closing quotation" ValueError: No closing quotation ``` writing `local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf` can cause trouble because it's complaining with missing quotes, this fix solves this issue. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1510555 Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>pull/2372/head
parent
b1a3c6e4cc
commit
deaf273b25
|
@ -213,7 +213,13 @@
|
|||
|
||||
- name: wait for server to boot
|
||||
become: false
|
||||
local_action: wait_for port=22 host={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} state=started delay=10 timeout=500
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
|
||||
state: started
|
||||
delay: 10
|
||||
timeout: 500
|
||||
|
||||
- name: remove data
|
||||
file:
|
||||
|
|
|
@ -20,17 +20,19 @@
|
|||
command: poweroff
|
||||
|
||||
- name: Wait for the server to go down
|
||||
local_action: >
|
||||
wait_for host=<your_host>
|
||||
port=22
|
||||
state=stopped
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: <your_host>
|
||||
port: 22
|
||||
state: stopped
|
||||
|
||||
- name: Wait for the server to come up
|
||||
local_action: >
|
||||
wait_for host=<your_host>
|
||||
port=22
|
||||
delay=10
|
||||
timeout=3600
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: <your_host>
|
||||
port: 22
|
||||
delay: 10
|
||||
timeout: 3600
|
||||
|
||||
- name: Unset the noout flag
|
||||
command: ceph osd unset noout
|
||||
|
|
|
@ -81,11 +81,11 @@
|
|||
when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the monitor to be up again
|
||||
local_action: >
|
||||
wait_for
|
||||
host={{ ansible_ssh_host | default(inventory_hostname) }}
|
||||
port=6789
|
||||
timeout=10
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
|
||||
port: 6789
|
||||
timeout: 10
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Stop the monitor (Upstart)
|
||||
|
@ -103,12 +103,12 @@
|
|||
when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the monitor to be down
|
||||
local_action: >
|
||||
wait_for
|
||||
host={{ ansible_ssh_host | default(inventory_hostname) }}
|
||||
port=6789
|
||||
timeout=10
|
||||
state=stopped
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
|
||||
port: 6789
|
||||
timeout: 10
|
||||
state: stopped
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Create a backup directory
|
||||
|
@ -142,11 +142,11 @@
|
|||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the server to come up
|
||||
local_action: >
|
||||
wait_for
|
||||
port=22
|
||||
delay=10
|
||||
timeout=3600
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
delay: 10
|
||||
timeout: 3600
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait a bit more to be sure that the server is ready
|
||||
|
@ -209,11 +209,11 @@
|
|||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the Monitor to be up again
|
||||
local_action: >
|
||||
wait_for
|
||||
host={{ ansible_ssh_host | default(inventory_hostname) }}
|
||||
port=6789
|
||||
timeout=10
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
|
||||
port: 6789
|
||||
timeout: 10
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Waiting for the monitor to join the quorum...
|
||||
|
@ -339,12 +339,12 @@
|
|||
when: osdsysvinit.rc == 0 and migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the OSDs to be down
|
||||
local_action: >
|
||||
wait_for
|
||||
host={{ ansible_ssh_host | default(inventory_hostname) }}
|
||||
port={{ item }}
|
||||
timeout=10
|
||||
state=stopped
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
|
||||
port: {{ item }}
|
||||
timeout: 10
|
||||
state: stopped
|
||||
with_items: "{{ osd_ports.stdout_lines }}"
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
|
@ -357,11 +357,11 @@
|
|||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the server to come up
|
||||
local_action: >
|
||||
wait_for
|
||||
port=22
|
||||
delay=10
|
||||
timeout=3600
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
delay: 10
|
||||
timeout: 3600
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait a bit to be sure that the server is ready for scp
|
||||
|
@ -486,12 +486,12 @@
|
|||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for radosgw to be down
|
||||
local_action: >
|
||||
wait_for
|
||||
host={{ ansible_ssh_host | default(inventory_hostname) }}
|
||||
path=/tmp/radosgw.sock
|
||||
state=absent
|
||||
timeout=30
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
|
||||
path: /tmp/radosgw.sock
|
||||
state: absent
|
||||
timeout: 30
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Reboot the server
|
||||
|
@ -499,11 +499,11 @@
|
|||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for the server to come up
|
||||
local_action: >
|
||||
wait_for
|
||||
port=22
|
||||
delay=10
|
||||
timeout=3600
|
||||
local_action:
|
||||
module: wait_for
|
||||
port: 22
|
||||
delay: 10
|
||||
timeout: 3600
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait a bit to be sure that the server is ready for scp
|
||||
|
@ -537,12 +537,12 @@
|
|||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Wait for radosgw to be up again
|
||||
local_action: >
|
||||
wait_for
|
||||
host={{ ansible_ssh_host | default(inventory_hostname) }}
|
||||
path=/tmp/radosgw.sock
|
||||
state=present
|
||||
timeout=30
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
|
||||
path: /tmp/radosgw.sock
|
||||
state: present
|
||||
timeout: 30
|
||||
when: migration_completed.stat.exists == False
|
||||
|
||||
- name: Done moving to the next rados gateway
|
||||
|
|
|
@ -6,11 +6,16 @@
|
|||
raw: "{{pypy_binary_directory}}/python $HOME/get-pip.py --proxy='{{ lookup('env', 'https_proxy') }}'"
|
||||
|
||||
- name: create local temp directory
|
||||
local_action: raw mkdir -p {{local_temp_directory}}
|
||||
local_action:
|
||||
module: raw
|
||||
mkdir -p {{local_temp_directory}}
|
||||
become: no
|
||||
|
||||
- name: prepare install_pip.sh
|
||||
local_action: template src=install_pip.sh.j2 dest={{local_temp_directory}}/install_pip.sh
|
||||
local_action:
|
||||
module: template
|
||||
src: install_pip.sh.j2
|
||||
dest: "{{local_temp_directory}}/install_pip.sh"
|
||||
become: no
|
||||
|
||||
- name: run pip.sh
|
||||
|
@ -29,5 +34,8 @@
|
|||
raw: touch $HOME/.pip
|
||||
|
||||
- name: remove pip.sh
|
||||
local_action: file path="{{local_temp_directory}}/pip.sh" state=absent
|
||||
local_action:
|
||||
module: file
|
||||
path: "{{local_temp_directory}}/pip.sh"
|
||||
state: absent
|
||||
become: no
|
||||
|
|
|
@ -9,11 +9,16 @@
|
|||
raw: mv $HOME/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}} {{pypy_directory}}
|
||||
|
||||
- name: create local temp directory
|
||||
local_action: raw mkdir -p {{local_temp_directory}}
|
||||
local_action:
|
||||
module: raw
|
||||
mkdir -p {{local_temp_directory}}
|
||||
become: no
|
||||
|
||||
- name: prepare python executable
|
||||
local_action: template src=install_python.sh.j2 dest={{local_temp_directory}}/install_python.sh
|
||||
local_action:
|
||||
module: template
|
||||
src: install_python.sh.j2
|
||||
dest: "{{local_temp_directory}}/install_python.sh"
|
||||
become: no
|
||||
|
||||
- name: fix library
|
||||
|
@ -32,5 +37,8 @@
|
|||
raw: touch $HOME/.python
|
||||
|
||||
- name: remove install_python.sh
|
||||
local_action: file path="{{local_temp_directory}}/install_python.sh" state=absent
|
||||
local_action:
|
||||
module: file
|
||||
path: "{{local_temp_directory}}/install_python.sh"
|
||||
state: absent
|
||||
become: no
|
||||
|
|
|
@ -15,7 +15,9 @@
|
|||
- nmapexist.rc != 0
|
||||
|
||||
- name: check if monitor port is not filtered
|
||||
local_action: shell set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: monportstate
|
||||
|
@ -33,7 +35,9 @@
|
|||
- monportstate.rc == 0
|
||||
|
||||
- name: check if osd and mds range is not filtered (osd hosts)
|
||||
local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: osdrangestate
|
||||
|
@ -51,7 +55,9 @@
|
|||
- osdrangestate.rc == 0
|
||||
|
||||
- name: check if osd and mds range is not filtered (mds hosts)
|
||||
local_action: shell set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: mdsrangestate
|
||||
|
@ -69,7 +75,9 @@
|
|||
- mdsrangestate.rc == 0
|
||||
|
||||
- name: check if rados gateway port is not filtered
|
||||
local_action: shell set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p {{ radosgw_civetweb_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: rgwportstate
|
||||
|
@ -87,7 +95,9 @@
|
|||
- rgwportstate.rc == 0
|
||||
|
||||
- name: check if NFS ports are not filtered
|
||||
local_action: shell set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
local_action:
|
||||
module: shell
|
||||
set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up'
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: nfsportstate
|
||||
|
|
|
@ -31,7 +31,9 @@
|
|||
- test_initial_monitor_keyring.rc != 0
|
||||
|
||||
- name: write initial mon keyring in {{ fetch_directory }}/monitor_keyring.conf if it doesn't exist
|
||||
local_action: shell echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
|
||||
local_action:
|
||||
module: shell
|
||||
echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
|
||||
become: false
|
||||
when:
|
||||
- test_initial_monitor_keyring.rc == 0
|
||||
|
|
|
@ -33,7 +33,10 @@
|
|||
state: absent
|
||||
|
||||
- name: remove tmp template file for ceph_conf_overrides (localhost)
|
||||
local_action: file path="{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}" state=absent
|
||||
local_action:
|
||||
module: file
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/ceph_conf_overrides_temp_{{ ansible_hostname }}"
|
||||
state: absent
|
||||
become: false
|
||||
|
||||
- name: "generate ceph configuration file: {{ cluster }}.conf"
|
||||
|
@ -72,7 +75,10 @@
|
|||
# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
|
||||
- block:
|
||||
- name: create a local fetch directory if it does not exist
|
||||
local_action: file path={{ fetch_directory }} state=directory
|
||||
local_action:
|
||||
module: file
|
||||
path: "{{ fetch_directory }}"
|
||||
state: directory
|
||||
changed_when: false
|
||||
become: false
|
||||
run_once: true
|
||||
|
@ -85,16 +91,20 @@
|
|||
and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
|
||||
|
||||
- name: generate cluster uuid
|
||||
local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
local_action:
|
||||
module: shell
|
||||
python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
register: cluster_uuid
|
||||
become: false
|
||||
when:
|
||||
- generate_fsid
|
||||
|
||||
- name: read cluster uuid if it already exists
|
||||
local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
local_action:
|
||||
module: command
|
||||
cat {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
changed_when: false
|
||||
register: cluster_uuid
|
||||
check_mode: no
|
||||
|
|
|
@ -34,7 +34,9 @@
|
|||
|
||||
# We want this check to be run only on the first node
|
||||
- name: check if {{ fetch_directory }} directory exists
|
||||
local_action: stat path="{{ fetch_directory }}/monitor_keyring.conf"
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/monitor_keyring.conf"
|
||||
become: false
|
||||
register: monitor_keyring_conf
|
||||
run_once: true
|
||||
|
@ -49,7 +51,10 @@
|
|||
- rolling_update or groups.get(mon_group_name, []) | length == 0
|
||||
|
||||
- name: create a local fetch directory if it does not exist
|
||||
local_action: file path={{ fetch_directory }} state=directory
|
||||
local_action:
|
||||
module: file
|
||||
path: "{{ fetch_directory }}"
|
||||
state: directory
|
||||
changed_when: false
|
||||
become: false
|
||||
run_once: true
|
||||
|
@ -68,8 +73,10 @@
|
|||
ceph_release: "{{ ceph_stable_release }}"
|
||||
|
||||
- name: generate cluster fsid
|
||||
local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
local_action:
|
||||
module: shell
|
||||
python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
register: cluster_uuid
|
||||
become: false
|
||||
when:
|
||||
|
@ -77,15 +84,19 @@
|
|||
- ceph_current_fsid.rc != 0
|
||||
|
||||
- name: reuse cluster fsid when cluster is already running
|
||||
local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
local_action:
|
||||
module: shell
|
||||
echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
become: false
|
||||
when:
|
||||
- ceph_current_fsid.rc == 0
|
||||
|
||||
- name: read cluster fsid if it already exists
|
||||
local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
local_action:
|
||||
module: command
|
||||
cat {{ fetch_directory }}/ceph_cluster_uuid.conf
|
||||
removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
|
||||
changed_when: false
|
||||
register: cluster_uuid
|
||||
become: false
|
||||
|
|
|
@ -49,7 +49,9 @@
|
|||
- repodigest_before_pulling == repodigest_after_pulling
|
||||
|
||||
- name: export local ceph dev image
|
||||
local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
|
||||
local_action:
|
||||
module: command
|
||||
docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
|
||||
when:
|
||||
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
|
||||
run_once: true
|
||||
|
|
|
@ -28,7 +28,9 @@
|
|||
when: groups.get(mgr_group_name, []) | length > 0
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -8,7 +8,9 @@
|
|||
- "/etc/ceph/iscsi-gateway-pub.key"
|
||||
|
||||
- name: stat for crt file(s)
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ crt_files }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
|
|
@ -9,7 +9,9 @@
|
|||
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
- /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -1,14 +1,18 @@
|
|||
---
|
||||
- name: generate monitor initial keyring
|
||||
local_action: shell python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
|
||||
creates={{ fetch_directory }}/monitor_keyring.conf
|
||||
local_action:
|
||||
module: shell
|
||||
python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print base64.b64encode(header + key)" | tee {{ fetch_directory }}/monitor_keyring.conf
|
||||
creates: "{{ fetch_directory }}/monitor_keyring.conf"
|
||||
register: monitor_keyring
|
||||
become: false
|
||||
when: cephx
|
||||
|
||||
- name: read monitor initial keyring if it already exists
|
||||
local_action: command cat {{ fetch_directory }}/monitor_keyring.conf
|
||||
removes={{ fetch_directory }}/monitor_keyring.conf
|
||||
local_action:
|
||||
module: command
|
||||
cat {{ fetch_directory }}/monitor_keyring.conf
|
||||
removes: "{{ fetch_directory }}/monitor_keyring.conf"
|
||||
changed_when: false
|
||||
register: monitor_keyring
|
||||
become: false
|
||||
|
|
|
@ -41,7 +41,9 @@
|
|||
- groups.get(mgr_group_name, []) | length > 0
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
|
||||
|
||||
- name: stat for config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -5,12 +5,16 @@
|
|||
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
|
||||
|
||||
- name: wait for ceph.conf and keys
|
||||
local_action: wait_for path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: wait_for
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
become: false
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -12,7 +12,9 @@
|
|||
- "{{ bootstrap_rbd_keyring | default('') }}"
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
- /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
|
||||
|
||||
- name: stat for ceph config and keys
|
||||
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
||||
local_action:
|
||||
module: stat
|
||||
path: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
|
||||
with_items: "{{ ceph_config_keys }}"
|
||||
changed_when: false
|
||||
become: false
|
||||
|
|
|
@ -10,7 +10,13 @@
|
|||
poll: 0
|
||||
|
||||
- name: waiting 3 minutes for the machines to come back
|
||||
local_action: wait_for host={{ ansible_default_ipv4.address }} port=22 state=started delay=30 timeout=180
|
||||
local_action:
|
||||
module: wait_for
|
||||
host: "{{ ansible_default_ipv4.address }}"
|
||||
port: 22
|
||||
state: started
|
||||
delay: 30
|
||||
timeout: 180
|
||||
|
||||
- name: uptime
|
||||
command: uptime
|
||||
|
|
Loading…
Reference in New Issue