fixed handling of new ceph_version

pull/664/head
James Saint-Rossy 2016-04-12 21:26:59 -04:00
commit fc270bb772
17 changed files with 170 additions and 80 deletions

View File

@ -27,7 +27,7 @@
- name: Wait for the server to come up
local_action: >
wait_for host=<your_host
wait_for host=<your_host>
port=22
delay=10
timeout=3600

View File

@ -184,11 +184,41 @@ dummy:
#rbd_cache: "true"
#rbd_cache_writethrough_until_flush: "true"
#rbd_concurrent_management_ops: 20
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
#rbd_client_directory_user: qemu
#rbd_client_directory_group: libvirtd
#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
# Permissions for the rbd_client_log_path and
# rbd_client_admin_socket_path. Depending on your use case for Ceph
# you may want to change these values. The default, which is used if
# any of the variables are unset or set to a false value (like `null`
# or `false`) is to automatically determine what is appropriate for
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
# for infernalis releases, and root:root and 1777 for pre-infernalis
# releases.
#
# For other use cases, including running Ceph with OpenStack, you'll
# want to set these differently:
#
# For OpenStack on RHEL, you'll want:
# rbd_client_directory_owner: "qemu"
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
# rbd_client_directory_mode: "0755"
#
# For OpenStack on Ubuntu or Debian, set:
# rbd_client_directory_owner: "libvirt-qemu"
# rbd_client_directory_group: "kvm"
# rbd_client_directory_mode: "0755"
#
# If you set rbd_client_directory_mode, you must use a string (e.g.,
# 'rbd_client_directory_mode: "0755"', *not*
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
# must be in octal or symbolic form
#rbd_client_directory_owner: null
#rbd_client_directory_group: null
#rbd_client_directory_mode: null
#rbd_client_log_path: /var/log/rbd-clients/
#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_default_features: 3
#rbd_default_map_options: rw

View File

@ -50,8 +50,8 @@
# This can cause problem with qemu-kvm
purge_all_packages: true
# When set to true and raw _multi_journal is used then journal disk are also zapped
zap_journal_disks: true
# When set to true and raw _multi_journal is used then block devices are also zapped
zap_block_devs: true
ceph_packages:
- ceph
@ -96,6 +96,7 @@
fail:
msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/osds using the devices variable."
when:
osd_group_name in group_names and
devices is not defined and
osd_auto_discovery
@ -108,7 +109,8 @@
shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi"
register: systemd_unit_files
# Infernalis
# after Hammer release
- name: stop ceph.target with systemd
service:
name: ceph.target
@ -166,34 +168,33 @@
systemd_unit_files.stdout != "0" and
rbdmirror_group_name in group_names
# before infernalis
# before infernalis release, using sysvinit scripts
# we use this test so we do not have to know which RPM contains the boot script
# or where it is placed.
- name: stop ceph osds
command: service ceph stop osd
shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
osd_group_name in group_names and
systemd_unit_files.stdout == "0"
osd_group_name in group_names
- name: stop ceph mons
command: service ceph stop mon
shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
mon_group_name in group_names and
systemd_unit_files.stdout == "0"
mon_group_name in group_names
- name: stop ceph mdss
command: service ceph stop mds
shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
mds_group_name in group_names and
systemd_unit_files.stdout == "0"
mds_group_name in group_names
- name: stop ceph rgws
command: service ceph-radosgw stop
shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi"
when:
ansible_os_family == 'RedHat' and
rgw_group_name in group_names and
systemd_unit_files.stdout == "0"
rgw_group_name in group_names
# Ubuntu 14.04
- name: stop ceph osds on ubuntu
@ -240,6 +241,16 @@
register: check_for_running_ceph
failed_when: check_for_running_ceph.rc == 0
- name: see if ceph-disk-created data partitions are present
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20data'"
failed_when: false
register: ceph_data_partlabels
- name: see if ceph-disk-created journal partitions are present
shell: "ls /dev/disk/by-partlabel | grep -q 'ceph\\\\x20journal'"
failed_when: false
register: ceph_journal_partlabels
- name: get osd data mount points
shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
register: mounted_osd
@ -280,19 +291,29 @@
osd_group_name in group_names and
remove_osd_mountpoints.rc != 0
- name: see if ceph-disk is installed
shell: "which ceph-disk"
failed_when: false
register: ceph_disk_present
- name: zap osd disks
shell: ceph-disk zap "{{ item }}"
with_items: devices
when:
osd_group_name in group_names
osd_group_name in group_names and
ceph_disk_present.rc == 0 and
ceph_data_partlabels.rc == 0 and
zap_block_devs
- name: zap journal devices
shell: ceph-disk zap "{{ item }}"
with_items: "{{ raw_journal_devices|default([])|unique }}"
when:
osd_group_name in group_names and
raw_multi_journal and
zap_journal_disks
ceph_disk_present.rc == 0 and
ceph_journal_partlabels.rc == 0 and
zap_block_devs and
raw_multi_journal
- name: purge ceph packages with yum
yum:

View File

@ -176,11 +176,41 @@ debug_mds_level: 20
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
rbd_concurrent_management_ops: 20
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
rbd_client_directory_user: qemu
rbd_client_directory_group: libvirtd
rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
# Permissions for the rbd_client_log_path and
# rbd_client_admin_socket_path. Depending on your use case for Ceph
# you may want to change these values. The default, which is used if
# any of the variables are unset or set to a false value (like `null`
# or `false`) is to automatically determine what is appropriate for
# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
# for infernalis releases, and root:root and 1777 for pre-infernalis
# releases.
#
# For other use cases, including running Ceph with OpenStack, you'll
# want to set these differently:
#
# For OpenStack on RHEL, you'll want:
# rbd_client_directory_owner: "qemu"
# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
# rbd_client_directory_mode: "0755"
#
# For OpenStack on Ubuntu or Debian, set:
# rbd_client_directory_owner: "libvirt-qemu"
# rbd_client_directory_group: "kvm"
# rbd_client_directory_mode: "0755"
#
# If you set rbd_client_directory_mode, you must use a string (e.g.,
# 'rbd_client_directory_mode: "0755"', *not*
# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
# must be in octal or symbolic form
rbd_client_directory_owner: null
rbd_client_directory_group: null
rbd_client_directory_mode: null
rbd_client_log_path: /var/log/rbd-clients/
rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_default_features: 3
rbd_default_map_options: rw

View File

@ -45,15 +45,3 @@
default_release: "{{ ansible_distribution_release }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
when:
mds_group_name in group_names
- name: configure rbd clients directories
file:
path: "{{ item }}"
state: directory
owner: libvirt-qemu
group: kvm
mode: 0755
with_items:
- rbd_client_log_path
- rbd_client_admin_socket_path
when: rbd_client_directories

View File

@ -139,15 +139,3 @@
when:
rgw_group_name in group_names and
ansible_pkg_mgr == "dnf"
- name: configure rbd clients directories
file:
path: "{{ item }}"
state: directory
owner: "{{ rbd_client_directory_user }}"
group: "{{ rbd_client_directory_group }}"
mode: 0755
with_items:
- rbd_client_log_path
- rbd_client_admin_socket_path
when: rbd_client_directories

View File

@ -115,16 +115,46 @@
when: not is_ceph_infernalis
- set_fact:
rbd_client_dir_owner: root
rbd_client_dir_group: root
rbd_client_dir_mode: "1777"
when: not is_ceph_infernalis
rbd_client_directory_owner: root
when:
not is_ceph_infernalis and
(rbd_client_directory_owner is not defined or
not rbd_client_directory_owner)
- set_fact:
rbd_client_dir_owner: ceph
rbd_client_dir_group: ceph
rbd_client_dir_mode: "0770"
when: is_ceph_infernalis
rbd_client_directory_owner: ceph
when:
is_ceph_infernalis and
(rbd_client_directory_owner is not defined or
not rbd_client_directory_owner)
- set_fact:
rbd_client_directory_group: root
when:
not is_ceph_infernalis and
(rbd_client_directory_group is not defined or
not rbd_client_directory_group)
- set_fact:
rbd_client_directory_group: ceph
when:
is_ceph_infernalis and
(rbd_client_directory_group is not defined or
not rbd_client_directory_group)
- set_fact:
rbd_client_directory_mode: "1777"
when:
not is_ceph_infernalis and
(rbd_client_directory_mode is not defined or
not rbd_client_directory_mode)
- set_fact:
rbd_client_directory_mode: "0770"
when:
is_ceph_infernalis and
(rbd_client_directory_mode is not defined or
not rbd_client_directory_mode)
- name: check for a ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
@ -197,11 +227,15 @@
- name: create rbd client directory
file:
path: "{{ rbd_client_admin_socket_path }}"
path: "{{ item }}"
state: directory
owner: "{{ rbd_client_dir_owner }}"
group: "{{ rbd_client_dir_group }}"
mode: "{{ rbd_client_dir_mode }}"
owner: "{{ rbd_client_directory_owner }}"
group: "{{ rbd_client_directory_group }}"
mode: "{{ rbd_client_directory_mode }}"
with_items:
- rbd_client_admin_socket_path
- rbd_client_log_path
when: rbd_client_directories
- name: configure cluster name
lineinfile:

View File

@ -7,7 +7,7 @@
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
shell: docker inspect docker.io/"{{ ceph_mds_docker_username }}"/"{{ ceph_mds_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true

View File

@ -9,9 +9,9 @@
- cephfs_data
- cephfs_metadata
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
when: not {{ ceph_version | version_compare('0.80.0', '<') }}
- name: create ceph filesystem
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
when: not {{ ceph_version | version_compare('0.80.0', '<') }}

View File

@ -7,7 +7,7 @@
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
shell: docker inspect docker.io/"{{ ceph_mon_docker_username }}"/"{{ ceph_mon_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true

View File

@ -2,11 +2,11 @@
- name: collect all the pools
command: rados --cluster {{ cluster }} lspools
register: ceph_pools
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
when: "{{ ceph_version | version_compare('0.94.0', '>=') }}"
- name: secure the cluster
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
with_nested:
- ceph_pools.stdout_lines
- secure_cluster_flags
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
when: "{{ ceph_version | version_compare('0.94.0', '>=') }}"

View File

@ -53,9 +53,3 @@
changed_when: false
failed_when: false
when: cephx
- name: get ceph monitor version
shell: ceph --cluster {{ cluster }} daemon mon."{{ monitor_name }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
changed_when: false
failed_when: "'No such file or directory' in ceph_version.stderr"
register: ceph_version

View File

@ -20,11 +20,19 @@
- devices
changed_when: false
failed_when: false
register: activate_osd_disk
when:
not item.0.get("skipped") and
item.0.get("rc", 0) != 0 and
not osd_auto_discovery
- name: fail if ceph-disk cannot create an OSD
fail:
msg: "ceph-disk failed to create an OSD"
when:
" 'ceph-disk: Error: ceph osd create failed' in item.get('stderr', '') "
with_items: "{{activate_osd_disk.results}}"
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
- name: activate osd(s) when device is a partition
command: "ceph-disk activate {{ item.1 }}"

View File

@ -7,7 +7,7 @@
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
shell: docker inspect docker.io/"{{ ceph_osd_docker_username }}"/"{{ ceph_osd_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true

View File

@ -1,6 +1,6 @@
---
- name: inspect ceph version
shell: "docker inspect --format '{{ index (index .Config.Env) 3 }}' docker.io/{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }} | cut -d '=' -f '2'"
shell: docker inspect docker.io/"{{ ceph_restapi_docker_username }}"/"{{ ceph_restapi_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true

View File

@ -7,7 +7,7 @@
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect docker.io/ceph/daemon | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
shell: docker inspect docker.io/"{{ ceph_rgw_docker_username }}"/"{{ ceph_rgw_docker_imagename }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true

View File

@ -8,6 +8,3 @@
- ceph-mds
- ceph-rgw
- ceph-fetch-keys
- ceph-agent
- ceph-common-coreos
- ceph-rbd-mirror