#!/bin/bash DELAY="{{ handler_health_osd_check_delay }}" CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}" check_pgs() { num_pgs=$($container_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])') if [[ "$num_pgs" == "0" ]]; then return 0 fi while [ $RETRIES -ne 0 ]; do test "$($container_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($container_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]]))')" RET=$? test $RET -eq 0 && return 0 sleep $DELAY let RETRIES=RETRIES-1 done # PGs not clean, exiting with return code 1 echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean" echo "It is possible that the cluster has less OSDs than the replica configuration" echo "Will refuse to continue" $container_exec ceph $CEPH_CLI -s $container_exec ceph $CEPH_CLI osd dump $container_exec ceph $CEPH_CLI osd tree $container_exec ceph $CEPH_CLI osd crush rule dump exit 1 } wait_for_socket_in_container() { osd_mount_point=$({{ container_binary }} exec "$1" df --output=target | grep '/var/lib/ceph/osd/') whoami=$({{ container_binary }} exec "$1" cat $osd_mount_point/whoami) if ! {{ container_binary }} exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/{{ cluster }}-osd.${whoami}.asok ]; do sleep 1 ; done"; then echo "Timed out while trying to look for a Ceph OSD socket." echo "Abort mission!" exit 1 fi } get_dev_name() { echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/' } get_container_id_from_dev_name() { local id local count count=10 while [ $count -ne 0 ]; do id=$({{ container_binary }} ps | grep -E "${1}$" | cut -d' ' -f1) test "$id" != "" && break sleep $DELAY let count=count-1 done echo "$id" } # For containerized deployments, the unit file looks like: ceph-osd@sda.service # For non-containerized deployments, the unit file looks like: ceph-osd@NNN.service where NNN is OSD ID for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]+|[a-z]+).service"); do # First, restart daemon(s) systemctl restart "${unit}" # We need to wait because it may take some time for the socket to actually exists COUNT=10 # Wait and ensure the socket exists after restarting the daemon {% if containerized_deployment %} osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}") container_exec="{{ container_binary }} exec $container_id" {% else %} osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') {% endif %} SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok while [ $COUNT -ne 0 ]; do RETRIES="{{ handler_health_osd_check_retries }}" $container_exec test -S "$SOCKET" && check_pgs && continue 2 sleep $DELAY let COUNT=COUNT-1 done # If we reach this point, it means the socket is not present. echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running. Showing ceph-osd unit logs now:" journalctl -u "${unit}" exit 1 done