mirror of https://github.com/ceph/ceph-ansible.git
fixed handling of new ceph_version
commit
fc270bb772
|
@ -27,7 +27,7 @@
|
||||||
|
|
||||||
- name: Wait for the server to come up
|
- name: Wait for the server to come up
|
||||||
local_action: >
|
local_action: >
|
||||||
wait_for host=<your_host
|
wait_for host=<your_host>
|
||||||
port=22
|
port=22
|
||||||
delay=10
|
delay=10
|
||||||
timeout=3600
|
timeout=3600
|
||||||
|
|
|
@ -184,11 +184,41 @@ dummy:
|
||||||
#rbd_cache: "true"
|
#rbd_cache: "true"
|
||||||
#rbd_cache_writethrough_until_flush: "true"
|
#rbd_cache_writethrough_until_flush: "true"
|
||||||
#rbd_concurrent_management_ops: 20
|
#rbd_concurrent_management_ops: 20
|
||||||
|
|
||||||
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
|
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
|
||||||
#rbd_client_directory_user: qemu
|
|
||||||
#rbd_client_directory_group: libvirtd
|
# Permissions for the rbd_client_log_path and
|
||||||
#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
|
# rbd_client_admin_socket_path. Depending on your use case for Ceph
|
||||||
|
# you may want to change these values. The default, which is used if
|
||||||
|
# any of the variables are unset or set to a false value (like `null`
|
||||||
|
# or `false`) is to automatically determine what is appropriate for
|
||||||
|
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
|
||||||
|
# for infernalis releases, and root:root and 1777 for pre-infernalis
|
||||||
|
# releases.
|
||||||
|
#
|
||||||
|
# For other use cases, including running Ceph with OpenStack, you'll
|
||||||
|
# want to set these differently:
|
||||||
|
#
|
||||||
|
# For OpenStack on RHEL, you'll want:
|
||||||
|
# rbd_client_directory_owner: "qemu"
|
||||||
|
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
|
||||||
|
# rbd_client_directory_mode: "0755"
|
||||||
|
#
|
||||||
|
# For OpenStack on Ubuntu or Debian, set:
|
||||||
|
# rbd_client_directory_owner: "libvirt-qemu"
|
||||||
|
# rbd_client_directory_group: "kvm"
|
||||||
|
# rbd_client_directory_mode: "0755"
|
||||||
|
#
|
||||||
|
# If you set rbd_client_directory_mode, you must use a string (e.g.,
|
||||||
|
# 'rbd_client_directory_mode: "0755"', *not*
|
||||||
|
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
|
||||||
|
# must be in octal or symbolic form
|
||||||
|
#rbd_client_directory_owner: null
|
||||||
|
#rbd_client_directory_group: null
|
||||||
|
#rbd_client_directory_mode: null
|
||||||
|
|
||||||
#rbd_client_log_path: /var/log/rbd-clients/
|
#rbd_client_log_path: /var/log/rbd-clients/
|
||||||
|
#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
|
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
#rbd_default_features: 3
|
#rbd_default_features: 3
|
||||||
#rbd_default_map_options: rw
|
#rbd_default_map_options: rw
|
||||||
|
|
|
@ -50,8 +50,8 @@
|
||||||
# This can cause problem with qemu-kvm
|
# This can cause problem with qemu-kvm
|
||||||
purge_all_packages: true
|
purge_all_packages: true
|
||||||
|
|
||||||
# When set to true and raw _multi_journal is used then journal disk are also zapped
|
# When set to true and raw _multi_journal is used then block devices are also zapped
|
||||||
zap_journal_disks: true
|
zap_block_devs: true
|
||||||
|
|
||||||
ceph_packages:
|
ceph_packages:
|
||||||
- ceph
|
- ceph
|
||||||
|
@ -96,6 +96,7 @@
|
||||||
fail:
|
fail:
|
||||||
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/osds using the devices variable."
|
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/osds using the devices variable."
|
||||||
when:
|
when:
|
||||||
|
osd_group_name in group_names and
|
||||||
devices is not defined and
|
devices is not defined and
|
||||||
osd_auto_discovery
|
osd_auto_discovery
|
||||||
|
|
||||||
|
@ -108,7 +109,8 @@
|
||||||
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
|
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
|
||||||
register: systemd_unit_files
|
register: systemd_unit_files
|
||||||
|
|
||||||
# Infernalis
|
# after Hammer release
|
||||||
|
|
||||||
- name: stop ceph.target with systemd
|
- name: stop ceph.target with systemd
|
||||||
service:
|
service:
|
||||||
name: ceph.target
|
name: ceph.target
|
||||||
|
@ -166,34 +168,33 @@
|
||||||
systemd_unit_files.stdout != "0" and
|
systemd_unit_files.stdout != "0" and
|
||||||
rbdmirror_group_name in group_names
|
rbdmirror_group_name in group_names
|
||||||
|
|
||||||
# before infernalis
|
# before infernalis release, using sysvinit scripts
|
||||||
|
# we use this test so we do not have to know which RPM contains the boot script
|
||||||
|
# or where it is placed.
|
||||||
|
|
||||||
- name: stop ceph osds
|
- name: stop ceph osds
|
||||||
command: service ceph stop osd
|
shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi"
|
||||||
when:
|
when:
|
||||||
ansible_os_family == 'RedHat' and
|
ansible_os_family == 'RedHat' and
|
||||||
osd_group_name in group_names and
|
osd_group_name in group_names
|
||||||
systemd_unit_files.stdout == "0"
|
|
||||||
|
|
||||||
- name: stop ceph mons
|
- name: stop ceph mons
|
||||||
command: service ceph stop mon
|
shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi"
|
||||||
when:
|
when:
|
||||||
ansible_os_family == 'RedHat' and
|
ansible_os_family == 'RedHat' and
|
||||||
mon_group_name in group_names and
|
mon_group_name in group_names
|
||||||
systemd_unit_files.stdout == "0"
|
|
||||||
|
|
||||||
- name: stop ceph mdss
|
- name: stop ceph mdss
|
||||||
command: service ceph stop mds
|
shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi"
|
||||||
when:
|
when:
|
||||||
ansible_os_family == 'RedHat' and
|
ansible_os_family == 'RedHat' and
|
||||||
mds_group_name in group_names and
|
mds_group_name in group_names
|
||||||
systemd_unit_files.stdout == "0"
|
|
||||||
|
|
||||||
- name: stop ceph rgws
|
- name: stop ceph rgws
|
||||||
command: service ceph-radosgw stop
|
shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi"
|
||||||
when:
|
when:
|
||||||
ansible_os_family == 'RedHat' and
|
ansible_os_family == 'RedHat' and
|
||||||
rgw_group_name in group_names and
|
rgw_group_name in group_names
|
||||||
systemd_unit_files.stdout == "0"
|
|
||||||
|
|
||||||
# Ubuntu 14.04
|
# Ubuntu 14.04
|
||||||
- name: stop ceph osds on ubuntu
|
- name: stop ceph osds on ubuntu
|
||||||
|
@ -240,6 +241,16 @@
|
||||||
register: check_for_running_ceph
|
register: check_for_running_ceph
|
||||||
failed_when: check_for_running_ceph.rc == 0
|
failed_when: check_for_running_ceph.rc == 0
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created data partitions are present
|
||||||
|
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_data_partlabels
|
||||||
|
|
||||||
|
- name: see if ceph-disk-created journal partitions are present
|
||||||
|
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_journal_partlabels
|
||||||
|
|
||||||
- name: get osd data mount points
|
- name: get osd data mount points
|
||||||
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
|
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
|
||||||
register: mounted_osd
|
register: mounted_osd
|
||||||
|
@ -280,19 +291,29 @@
|
||||||
osd_group_name in group_names and
|
osd_group_name in group_names and
|
||||||
remove_osd_mountpoints.rc != 0
|
remove_osd_mountpoints.rc != 0
|
||||||
|
|
||||||
|
- name: see if ceph-disk is installed
|
||||||
|
shell: "which ceph-disk"
|
||||||
|
failed_when: false
|
||||||
|
register: ceph_disk_present
|
||||||
|
|
||||||
- name: zap osd disks
|
- name: zap osd disks
|
||||||
shell: ceph-disk zap "{{ item }}"
|
shell: ceph-disk zap "{{ item }}"
|
||||||
with_items: devices
|
with_items: devices
|
||||||
when:
|
when:
|
||||||
osd_group_name in group_names
|
osd_group_name in group_names and
|
||||||
|
ceph_disk_present.rc == 0 and
|
||||||
|
ceph_data_partlabels.rc == 0 and
|
||||||
|
zap_block_devs
|
||||||
|
|
||||||
- name: zap journal devices
|
- name: zap journal devices
|
||||||
shell: ceph-disk zap "{{ item }}"
|
shell: ceph-disk zap "{{ item }}"
|
||||||
with_items: "{{ raw_journal_devices|default([])|unique }}"
|
with_items: "{{ raw_journal_devices|default([])|unique }}"
|
||||||
when:
|
when:
|
||||||
osd_group_name in group_names and
|
osd_group_name in group_names and
|
||||||
raw_multi_journal and
|
ceph_disk_present.rc == 0 and
|
||||||
zap_journal_disks
|
ceph_journal_partlabels.rc == 0 and
|
||||||
|
zap_block_devs and
|
||||||
|
raw_multi_journal
|
||||||
|
|
||||||
- name: purge ceph packages with yum
|
- name: purge ceph packages with yum
|
||||||
yum:
|
yum:
|
||||||
|
|
|
@ -176,11 +176,41 @@ debug_mds_level: 20
|
||||||
rbd_cache: "true"
|
rbd_cache: "true"
|
||||||
rbd_cache_writethrough_until_flush: "true"
|
rbd_cache_writethrough_until_flush: "true"
|
||||||
rbd_concurrent_management_ops: 20
|
rbd_concurrent_management_ops: 20
|
||||||
|
|
||||||
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
|
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
|
||||||
rbd_client_directory_user: qemu
|
|
||||||
rbd_client_directory_group: libvirtd
|
# Permissions for the rbd_client_log_path and
|
||||||
rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
|
# rbd_client_admin_socket_path. Depending on your use case for Ceph
|
||||||
|
# you may want to change these values. The default, which is used if
|
||||||
|
# any of the variables are unset or set to a false value (like `null`
|
||||||
|
# or `false`) is to automatically determine what is appropriate for
|
||||||
|
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
|
||||||
|
# for infernalis releases, and root:root and 1777 for pre-infernalis
|
||||||
|
# releases.
|
||||||
|
#
|
||||||
|
# For other use cases, including running Ceph with OpenStack, you'll
|
||||||
|
# want to set these differently:
|
||||||
|
#
|
||||||
|
# For OpenStack on RHEL, you'll want:
|
||||||
|
# rbd_client_directory_owner: "qemu"
|
||||||
|
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
|
||||||
|
# rbd_client_directory_mode: "0755"
|
||||||
|
#
|
||||||
|
# For OpenStack on Ubuntu or Debian, set:
|
||||||
|
# rbd_client_directory_owner: "libvirt-qemu"
|
||||||
|
# rbd_client_directory_group: "kvm"
|
||||||
|
# rbd_client_directory_mode: "0755"
|
||||||
|
#
|
||||||
|
# If you set rbd_client_directory_mode, you must use a string (e.g.,
|
||||||
|
# 'rbd_client_directory_mode: "0755"', *not*
|
||||||
|
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
|
||||||
|
# must be in octal or symbolic form
|
||||||
|
rbd_client_directory_owner: null
|
||||||
|
rbd_client_directory_group: null
|
||||||
|
rbd_client_directory_mode: null
|
||||||
|
|
||||||
rbd_client_log_path: /var/log/rbd-clients/
|
rbd_client_log_path: /var/log/rbd-clients/
|
||||||
|
rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
|
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
|
||||||
rbd_default_features: 3
|
rbd_default_features: 3
|
||||||
rbd_default_map_options: rw
|
rbd_default_map_options: rw
|
||||||
|
|
|
@ -45,15 +45,3 @@
|
||||||
default_release: "{{ ansible_distribution_release }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
|
default_release: "{{ ansible_distribution_release }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
|
||||||
when:
|
when:
|
||||||
mds_group_name in group_names
|
mds_group_name in group_names
|
||||||
|
|
||||||
- name: configure rbd clients directories
|
|
||||||
file:
|
|
||||||
path: "{{ item }}"
|
|
||||||
state: directory
|
|
||||||
owner: libvirt-qemu
|
|
||||||
group: kvm
|
|
||||||
mode: 0755
|
|
||||||
with_items:
|
|
||||||
- rbd_client_log_path
|
|
||||||
- rbd_client_admin_socket_path
|
|
||||||
when: rbd_client_directories
|
|
||||||
|
|
|
@ -139,15 +139,3 @@
|
||||||
when:
|
when:
|
||||||
rgw_group_name in group_names and
|
rgw_group_name in group_names and
|
||||||
ansible_pkg_mgr == "dnf"
|
ansible_pkg_mgr == "dnf"
|
||||||
|
|
||||||
- name: configure rbd clients directories
|
|
||||||
file:
|
|
||||||
path: "{{ item }}"
|
|
||||||
state: directory
|
|
||||||
owner: "{{ rbd_client_directory_user }}"
|
|
||||||
group: "{{ rbd_client_directory_group }}"
|
|
||||||
mode: 0755
|
|
||||||
with_items:
|
|
||||||
- rbd_client_log_path
|
|
||||||
- rbd_client_admin_socket_path
|
|
||||||
when: rbd_client_directories
|
|
||||||
|
|
|
@ -115,16 +115,46 @@
|
||||||
when: not is_ceph_infernalis
|
when: not is_ceph_infernalis
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
rbd_client_dir_owner: root
|
rbd_client_directory_owner: root
|
||||||
rbd_client_dir_group: root
|
when:
|
||||||
rbd_client_dir_mode: "1777"
|
not is_ceph_infernalis and
|
||||||
when: not is_ceph_infernalis
|
(rbd_client_directory_owner is not defined or
|
||||||
|
not rbd_client_directory_owner)
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
rbd_client_dir_owner: ceph
|
rbd_client_directory_owner: ceph
|
||||||
rbd_client_dir_group: ceph
|
when:
|
||||||
rbd_client_dir_mode: "0770"
|
is_ceph_infernalis and
|
||||||
when: is_ceph_infernalis
|
(rbd_client_directory_owner is not defined or
|
||||||
|
not rbd_client_directory_owner)
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
rbd_client_directory_group: root
|
||||||
|
when:
|
||||||
|
not is_ceph_infernalis and
|
||||||
|
(rbd_client_directory_group is not defined or
|
||||||
|
not rbd_client_directory_group)
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
rbd_client_directory_group: ceph
|
||||||
|
when:
|
||||||
|
is_ceph_infernalis and
|
||||||
|
(rbd_client_directory_group is not defined or
|
||||||
|
not rbd_client_directory_group)
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
rbd_client_directory_mode: "1777"
|
||||||
|
when:
|
||||||
|
not is_ceph_infernalis and
|
||||||
|
(rbd_client_directory_mode is not defined or
|
||||||
|
not rbd_client_directory_mode)
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
rbd_client_directory_mode: "0770"
|
||||||
|
when:
|
||||||
|
is_ceph_infernalis and
|
||||||
|
(rbd_client_directory_mode is not defined or
|
||||||
|
not rbd_client_directory_mode)
|
||||||
|
|
||||||
- name: check for a ceph socket
|
- name: check for a ceph socket
|
||||||
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
|
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
|
||||||
|
@ -197,11 +227,15 @@
|
||||||
|
|
||||||
- name: create rbd client directory
|
- name: create rbd client directory
|
||||||
file:
|
file:
|
||||||
path: "{{ rbd_client_admin_socket_path }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ rbd_client_dir_owner }}"
|
owner: "{{ rbd_client_directory_owner }}"
|
||||||
group: "{{ rbd_client_dir_group }}"
|
group: "{{ rbd_client_directory_group }}"
|
||||||
mode: "{{ rbd_client_dir_mode }}"
|
mode: "{{ rbd_client_directory_mode }}"
|
||||||
|
with_items:
|
||||||
|
- rbd_client_admin_socket_path
|
||||||
|
- rbd_client_log_path
|
||||||
|
when: rbd_client_directories
|
||||||
|
|
||||||
- name: configure cluster name
|
- name: configure cluster name
|
||||||
lineinfile:
|
lineinfile:
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
||||||
# https://github.com/ansible/ansible/issues/10156
|
# https://github.com/ansible/ansible/issues/10156
|
||||||
- name: inspect ceph version
|
- name: inspect ceph version
|
||||||
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
shell: docker inspect docker.io/"{{ ceph_mds_docker_username }}"/"{{ ceph_mds_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -9,9 +9,9 @@
|
||||||
- cephfs_data
|
- cephfs_data
|
||||||
- cephfs_metadata
|
- cephfs_metadata
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
|
when: not {{ ceph_version | version_compare('0.80.0', '<') }}
|
||||||
|
|
||||||
- name: create ceph filesystem
|
- name: create ceph filesystem
|
||||||
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
|
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
|
when: not {{ ceph_version | version_compare('0.80.0', '<') }}
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
||||||
# https://github.com/ansible/ansible/issues/10156
|
# https://github.com/ansible/ansible/issues/10156
|
||||||
- name: inspect ceph version
|
- name: inspect ceph version
|
||||||
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
shell: docker inspect docker.io/"{{ ceph_mon_docker_username }}"/"{{ ceph_mon_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
- name: collect all the pools
|
- name: collect all the pools
|
||||||
command: rados --cluster {{ cluster }} lspools
|
command: rados --cluster {{ cluster }} lspools
|
||||||
register: ceph_pools
|
register: ceph_pools
|
||||||
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
|
when: "{{ ceph_version | version_compare('0.94.0', '>=') }}"
|
||||||
|
|
||||||
- name: secure the cluster
|
- name: secure the cluster
|
||||||
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
|
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
|
||||||
with_nested:
|
with_nested:
|
||||||
- ceph_pools.stdout_lines
|
- ceph_pools.stdout_lines
|
||||||
- secure_cluster_flags
|
- secure_cluster_flags
|
||||||
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
|
when: "{{ ceph_version | version_compare('0.94.0', '>=') }}"
|
||||||
|
|
|
@ -53,9 +53,3 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: cephx
|
when: cephx
|
||||||
|
|
||||||
- name: get ceph monitor version
|
|
||||||
shell: ceph --cluster {{ cluster }} daemon mon."{{ monitor_name }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
|
|
||||||
changed_when: false
|
|
||||||
failed_when: "'No such file or directory' in ceph_version.stderr"
|
|
||||||
register: ceph_version
|
|
||||||
|
|
|
@ -20,11 +20,19 @@
|
||||||
- devices
|
- devices
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
register: activate_osd_disk
|
||||||
when:
|
when:
|
||||||
not item.0.get("skipped") and
|
not item.0.get("skipped") and
|
||||||
item.0.get("rc", 0) != 0 and
|
item.0.get("rc", 0) != 0 and
|
||||||
not osd_auto_discovery
|
not osd_auto_discovery
|
||||||
|
|
||||||
|
- name: fail if ceph-disk cannot create an OSD
|
||||||
|
fail:
|
||||||
|
msg: "ceph-disk failed to create an OSD"
|
||||||
|
when:
|
||||||
|
" 'ceph-disk: Error: ceph osd create failed' in item.get('stderr', '') "
|
||||||
|
with_items: "{{activate_osd_disk.results}}"
|
||||||
|
|
||||||
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
|
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
|
||||||
- name: activate osd(s) when device is a partition
|
- name: activate osd(s) when device is a partition
|
||||||
command: "ceph-disk activate {{ item.1 }}"
|
command: "ceph-disk activate {{ item.1 }}"
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
||||||
# https://github.com/ansible/ansible/issues/10156
|
# https://github.com/ansible/ansible/issues/10156
|
||||||
- name: inspect ceph version
|
- name: inspect ceph version
|
||||||
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
shell: docker inspect docker.io/"{{ ceph_osd_docker_username }}"/"{{ ceph_osd_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: inspect ceph version
|
- name: inspect ceph version
|
||||||
shell: "docker inspect --format '{{ index (index .Config.Env) 3 }}' docker.io/{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }} | cut -d '=' -f '2'"
|
shell: docker inspect docker.io/"{{ ceph_restapi_docker_username }}"/"{{ ceph_restapi_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
|
||||||
# https://github.com/ansible/ansible/issues/10156
|
# https://github.com/ansible/ansible/issues/10156
|
||||||
- name: inspect ceph version
|
- name: inspect ceph version
|
||||||
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
shell: docker inspect docker.io/"{{ ceph_rgw_docker_username }}"/"{{ ceph_rgw_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
Loading…
Reference in New Issue