From aa97ecf0480c1075187b38038463f2f52144c754 Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Fri, 21 Sep 2018 17:55:01 +0100 Subject: [PATCH 001/105] restart_osd_daemon.sh.j2 - Reset RETRIES between calls of check_pgs Previously RETRIES was set (by default to 40) once at the start of the script; this meant that it would only ever wait for up to 40 lots of 30s across *all* the OSDs on a host before bombing out. In fact, we want to be prepared to wait for the same amount of time after each OSD restart for the clusters' pgs to be happy again before continuing. Closes: #3154 Signed-off-by: Matthew Vernon --- roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 index 0781c3420..1d9db15b7 100644 --- a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 +++ b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 @@ -1,6 +1,5 @@ #!/bin/bash -RETRIES="{{ handler_health_osd_check_retries }}" DELAY="{{ handler_health_osd_check_delay }}" CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}" @@ -78,6 +77,7 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph- {% endif %} SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok while [ $COUNT -ne 0 ]; do + RETRIES="{{ handler_health_osd_check_retries }}" $docker_exec test -S "$SOCKET" && check_pgs && continue 2 sleep $DELAY let COUNT=COUNT-1 From 04f4991648568e079f19f8e531a11a5fddd45c87 Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Wed, 19 Sep 2018 13:26:26 +0100 Subject: [PATCH 002/105] restart_osd_daemon.sh.j2 - consider active+clean+* pgs as OK After restarting each OSD, restart_osd_daemon.sh checks that the cluster is in a good state before moving on to the next one. One of the checks it does is that the number of pgs in the state "active+clean" is equal to the total number of pgs in the cluster. On large clusters (e.g. we have 173,696 pgs), it is likely that at least one pg will be scrubbing and/or deep-scrubbing at any one time. These pgs are in state "active+clean+scrubbing" or "active+clean+scrubbing+deep", so the script was erroneously not including them in the "good" count. Similar concerns apply to "active+clean+snaptrim" and "active+clean+snaptrim_wait". Fix this by considering as good any pg whose state contains active+clean. Do this as an integer comparison to num_pgs in pgmap. (could this be backported to at least stable-3.0 please?) Closes: #2008 Signed-off-by: Matthew Vernon --- roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 index 1d9db15b7..5aa3b714d 100644 --- a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 +++ b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 @@ -9,7 +9,7 @@ check_pgs() { return 0 fi while [ $RETRIES -ne 0 ]; do - test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')" + test "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]])')" RET=$? test $RET -eq 0 && return 0 sleep $DELAY From 806461ac6edd6aada39173df9d9163239fd82555 Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Wed, 19 Sep 2018 14:25:15 +0100 Subject: [PATCH 003/105] restart_osd_daemon.sh.j2 - use `+` rather than `{1,}` in regex `+` is more idiomatic for "one or more" in a regex than `{1,}`; the latter was introduced in a previous fix for an incorrect `{1,2}` restriction. Signed-off-by: Matthew Vernon --- roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 index 5aa3b714d..15b255900 100644 --- a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 +++ b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 @@ -60,7 +60,7 @@ get_docker_osd_id() { # For containerized deployments, the unit file looks like: ceph-osd@sda.service # For non-containerized deployments, the unit file looks like: ceph-osd@NNN.service where NNN is OSD ID -for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]{1,}|[a-z]+).service"); do +for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]+|[a-z]+).service"); do # First, restart daemon(s) systemctl restart "${unit}" # We need to wait because it may take some time for the socket to actually exists @@ -73,7 +73,7 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph- osd_id=$whoami docker_exec="docker exec $container_id" {% else %} - osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]{1,}') + osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') {% endif %} SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok while [ $COUNT -ne 0 ]; do From 6126210e0e426a4dc96ef78f90c8c6473f4c5b7c Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Mon, 24 Sep 2018 10:17:02 +0200 Subject: [PATCH 004/105] Fix version check in ceph.conf template We need to look for ceph_release when comparing with release names, not ceph_version. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1631789 Signed-off-by: Giulio Fidente --- roles/ceph-config/templates/ceph.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 index 943acad22..090d5fb51 100644 --- a/roles/ceph-config/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -17,7 +17,7 @@ fsid = {{ fsid }} {% if common_single_host_mode is defined and common_single_host_mode %} osd crush chooseleaf type = 0 {% endif %} -{% if ceph_version not in ['jewel', 'kraken', 'luminous'] and containerized_deployment %} +{% if ceph_release not in ['jewel', 'kraken', 'luminous'] and containerized_deployment %} # let's force the admin socket the way it was so we can properly check for existing instances # also the line $cluster-$name.$pid.$cctid.asok is only needed when running multiple instances # of the same daemon, thing ceph-ansible cannot do at the time of writing From 3285b47703be29f65825e91870e3c9fdea821314 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 21 Sep 2018 17:16:00 +0200 Subject: [PATCH 005/105] tests: add an RGW node on osd0 for ooo-collocation get more coverage by adding an RGW daemon collocated on osd0. We've missed a bug in the past which could have been caught earlier in the CI. Let's add this additional daemon in order to have a better coverage. Signed-off-by: Guillaume Abrioux --- tests/functional/centos/7/ooo-collocation/hosts | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/functional/centos/7/ooo-collocation/hosts b/tests/functional/centos/7/ooo-collocation/hosts index c75f1599e..ae671a1be 100644 --- a/tests/functional/centos/7/ooo-collocation/hosts +++ b/tests/functional/centos/7/ooo-collocation/hosts @@ -85,3 +85,4 @@ rbdmirrors: rgws: hosts: mon0: {} + osd0: {} From 4cd675e7ec3e63e5c4f478ac579b9d8275239c22 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Fri, 21 Sep 2018 09:54:43 -0500 Subject: [PATCH 006/105] docs: supported validation by the ceph-validate role List the osd_scenarios and install options that are validated by the ceph-validate role in the documentation. Signed-off-by: Andrew Schoen --- docs/source/index.rst | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docs/source/index.rst b/docs/source/index.rst index d6e7e3ae9..48f246641 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -152,6 +152,26 @@ An example of a validation failure might look like: "changed": false } +Supported Validation +^^^^^^^^^^^^^^^^^^^^ + +The ``ceph-validate`` role currently supports validation of the proper config for the following +osd scenarios: + +- ``collocated`` +- ``non-collocated`` +- ``lvm`` + +The following install options are also validated by the ``ceph-validate`` role: + +- ``ceph_origin`` set to ``distro`` +- ``ceph_origin`` set to ``repository`` +- ``ceph_origin`` set to ``local`` +- ``ceph_repository`` set to ``rhcs`` +- ``ceph_repository`` set to ``dev`` +- ``ceph_repository`` set to ``community`` + + Installation methods -------------------- From c13a3c34929f34af11fbd746e9c0502a70f84b97 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 24 Sep 2018 14:21:24 +0200 Subject: [PATCH 007/105] upgrade: consider all 'active+clean' states as valid pgs In cluster with a large number of PGs, it can be expected some of them scrubbing, it's a normal operation. Preventing from scrubbing operation force to set noscrub flag before a rolling update which is a problem because it pauses an important data integrity operation until the end of the rolling upgrade. This commit allows an upgrade even while PGs are scrubbing. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1616066 Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/rolling_update.yml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index abc0e14d4..5bb1b4983 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -185,8 +185,6 @@ with_items: - noout - norebalance - - noscrub - - nodeep-scrub delegate_to: "{{ mon_host }}" when: not containerized_deployment @@ -196,8 +194,6 @@ with_items: - noout - norebalance - - noscrub - - nodeep-scrub delegate_to: "{{ mon_host }}" when: containerized_deployment @@ -427,9 +423,7 @@ command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" register: ceph_health_post until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 - and - (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" + ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs delegate_to: "{{ groups[mon_group_name][0] }}" retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" @@ -461,8 +455,6 @@ with_items: - noout - norebalance - - noscrub - - nodeep-scrub delegate_to: "{{ groups[mon_group_name][0] }}" - name: get osd versions From 179c4d00d702ff9f7a10a3eaa513c289dd75d038 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 25 Sep 2018 14:21:44 +0200 Subject: [PATCH 008/105] rolling_update: ensure pgs_by_state has at least 1 entry Previous commit c13a3c3 has removed a condition. This commit brings back this condition which is essential to ensure we won't hit a false positive result in the `when` condition for the check PGs task. Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/rolling_update.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 5bb1b4983..3013fffa3 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -423,7 +423,9 @@ command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" register: ceph_health_post until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs + (((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0) + and + (((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs) delegate_to: "{{ groups[mon_group_name][0] }}" retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" From 144c92b21ff151cd490fc9f47f7d90a19021e4c6 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 27 Sep 2018 11:33:51 +0200 Subject: [PATCH 009/105] purge: actually remove of /var/lib/ceph/* 38dc20e74b89c1833d45f677f405fe758fd10c04 introduced a bug in the purge playbooks because using `*` in `command` module doesn't work. `/var/lib/ceph/*` files are not purged it means there is a leftover. When trying to redeploy a cluster, it failed because monitor daemon was detecting existing keyring, therefore, it assumed a cluster already existed. Typical error (from container output): ``` Sep 26 13:18:16 mon0 docker[31316]: 2018-09-26 13:18:16 /entrypoint.sh: Existing mon, trying to rejoin cluster... Sep 26 13:18:16 mon0 docker[31316]: 2018-09-26 13:18:16.9323937f15b0d74700 -1 auth: unable to find a keyring on /etc/ceph/test.client.admin.keyring,/etc/ceph/test.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,:(2) No such file or directory Sep 26 13:18:23 mon0 docker[31316]: 2018-09-26 13:18:23 /entrypoint.sh: SUCCESS ``` Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1633563 Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/purge-cluster.yml | 4 ++-- infrastructure-playbooks/purge-docker-cluster.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 9f3692abc..64d8fc005 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -222,7 +222,7 @@ timeout: 500 - name: remove data - command: rm -rf /var/lib/ceph/* + shell: rm -rf /var/lib/ceph/* tasks: @@ -534,7 +534,7 @@ listen: "remove data" - name: remove data - command: rm -rf /var/lib/ceph/* + shell: rm -rf /var/lib/ceph/* listen: "remove data" tasks: diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index b52c491df..570085334 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -654,7 +654,7 @@ - /var/log/ceph - name: remove data - command: rm -rf /var/lib/ceph/* + shell: rm -rf /var/lib/ceph/* - name: purge fetch directory From 380168dadc7332a2dbbcc464e3c3648756b4946c Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Tue, 21 Aug 2018 19:53:35 +0530 Subject: [PATCH 010/105] don't use "include" to include tasks Use "import_tasks" or "include_tasks" instead. Signed-off-by: Rishabh Dave --- .../untested-by-ci/purge-multisite.yml | 2 +- roles/ceph-agent/tasks/main.yml | 4 ++-- roles/ceph-client/tasks/main.yml | 4 ++-- roles/ceph-common-coreos/tasks/main.yml | 4 ++-- ...nfigure_debian_repository_installation.yml | 10 +++++----- ...nfigure_redhat_repository_installation.yml | 8 ++++---- ...configure_suse_repository_installation.yml | 2 +- .../tasks/installs/debian_rhcs_repository.yml | 4 ++-- .../tasks/installs/install_on_debian.yml | 6 +++--- .../tasks/installs/install_on_redhat.yml | 6 +++--- .../tasks/installs/install_on_suse.yml | 4 ++-- .../tasks/installs/redhat_rhcs_repository.yml | 4 ++-- roles/ceph-common/tasks/main.yml | 10 +++++----- .../tasks/check_running_cluster.yml | 4 ++-- roles/ceph-defaults/tasks/main.yml | 6 +++--- roles/ceph-docker-common/tasks/checks.yml | 2 +- roles/ceph-docker-common/tasks/main.yml | 20 +++++++++---------- .../tasks/misc/ntp_atomic.yml | 2 +- .../tasks/misc/ntp_debian.yml | 2 +- .../ceph-docker-common/tasks/misc/ntp_rpm.yml | 2 +- .../tasks/pre_requisites/prerequisites.yml | 4 ++-- roles/ceph-iscsi-gw/tasks/main.yml | 10 +++++----- roles/ceph-mds/tasks/main.yml | 8 ++++---- roles/ceph-mgr/tasks/docker/main.yml | 2 +- roles/ceph-mgr/tasks/main.yml | 6 +++--- roles/ceph-mon/tasks/docker/main.yml | 8 ++++---- roles/ceph-mon/tasks/main.yml | 14 ++++++------- roles/ceph-nfs/tasks/main.yml | 12 +++++------ roles/ceph-osd/tasks/docker/main.yml | 2 +- roles/ceph-osd/tasks/main.yml | 8 ++++---- roles/ceph-rbd-mirror/tasks/docker/main.yml | 2 +- roles/ceph-rbd-mirror/tasks/main.yml | 10 +++++----- roles/ceph-restapi/tasks/docker/main.yml | 4 ++-- roles/ceph-restapi/tasks/main.yml | 6 +++--- roles/ceph-rgw/tasks/docker/main.yml | 2 +- roles/ceph-rgw/tasks/main.yml | 2 +- roles/ceph-rgw/tasks/multisite/main.yml | 2 +- roles/ceph-validate/tasks/main.yml | 8 ++++---- 38 files changed, 108 insertions(+), 108 deletions(-) diff --git a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml index 8b78553ac..c17500e52 100644 --- a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml +++ b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml @@ -3,7 +3,7 @@ - hosts: rgws become: True tasks: - - include: roles/ceph-rgw/tasks/multisite/destroy.yml + - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml handlers: - include: roles/ceph-rgw/handlers/main.yml diff --git a/roles/ceph-agent/tasks/main.yml b/roles/ceph-agent/tasks/main.yml index 9f7159766..2c252c96a 100644 --- a/roles/ceph-agent/tasks/main.yml +++ b/roles/ceph-agent/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: include pre_requisite.yml - include: pre_requisite.yml + include_tasks: pre_requisite.yml - name: include start_agent.yml - include: start_agent.yml + include_tasks: start_agent.yml diff --git a/roles/ceph-client/tasks/main.yml b/roles/ceph-client/tasks/main.yml index 3a03bfe5d..87ea5fd70 100644 --- a/roles/ceph-client/tasks/main.yml +++ b/roles/ceph-client/tasks/main.yml @@ -1,8 +1,8 @@ --- - name: include pre_requisite.yml - include: pre_requisite.yml + include_tasks: pre_requisite.yml - name: include create_users_keys.yml - include: create_users_keys.yml + include_tasks: create_users_keys.yml when: - user_config diff --git a/roles/ceph-common-coreos/tasks/main.yml b/roles/ceph-common-coreos/tasks/main.yml index b0992e802..38ae82ce6 100644 --- a/roles/ceph-common-coreos/tasks/main.yml +++ b/roles/ceph-common-coreos/tasks/main.yml @@ -5,7 +5,7 @@ ignore_errors: true check_mode: no -- include: install_pypy.yml +- include_tasks: install_pypy.yml when: need_python | failed - name: check if there is pip @@ -14,5 +14,5 @@ ignore_errors: true check_mode: no -- include: install_pip.yml +- include_tasks: install_pip.yml when: need_pip | failed and need_python | failed diff --git a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml index 466f26283..59e6aa2f3 100644 --- a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml @@ -1,26 +1,26 @@ --- - name: include debian_community_repository.yml - include: debian_community_repository.yml + include_tasks: debian_community_repository.yml when: - ceph_repository == 'community' - name: include debian_rhcs_repository.yml - include: debian_rhcs_repository.yml + include_tasks: debian_rhcs_repository.yml when: - ceph_repository == 'rhcs' - name: include debian_dev_repository.yml - include: debian_dev_repository.yml + include_tasks: debian_dev_repository.yml when: - ceph_repository == 'dev' - name: include debian_custom_repository.yml - include: debian_custom_repository.yml + include_tasks: debian_custom_repository.yml when: - ceph_repository == 'custom' - name: include debian_uca_repository.yml - include: debian_uca_repository.yml + include_tasks: debian_uca_repository.yml when: - ceph_repository == 'uca' diff --git a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml index 74f18bc66..b0190df7f 100644 --- a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml @@ -1,21 +1,21 @@ --- - name: include redhat_community_repository.yml - include: redhat_community_repository.yml + include_tasks: redhat_community_repository.yml when: - ceph_repository == 'community' - name: include redhat_rhcs_repository.yml - include: redhat_rhcs_repository.yml + include_tasks: redhat_rhcs_repository.yml when: - ceph_repository == 'rhcs' - name: include redhat_dev_repository.yml - include: redhat_dev_repository.yml + include_tasks: redhat_dev_repository.yml when: - ceph_repository == 'dev' - name: include redhat_custom_repository.yml - include: redhat_custom_repository.yml + include_tasks: redhat_custom_repository.yml when: - ceph_repository == 'custom' diff --git a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml index 15665aa90..1e23e3ed3 100644 --- a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml @@ -1,5 +1,5 @@ --- - name: include suse_obs_repository.yml - include: suse_obs_repository.yml + include_tasks: suse_obs_repository.yml when: - ceph_repository == 'obs' diff --git a/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml b/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml index f381be077..4638a991e 100644 --- a/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml +++ b/roles/ceph-common/tasks/installs/debian_rhcs_repository.yml @@ -8,11 +8,11 @@ mode: 0644 - name: include prerequisite_rhcs_iso_install_debian.yml - include: prerequisite_rhcs_iso_install_debian.yml + include_tasks: prerequisite_rhcs_iso_install_debian.yml when: - ceph_repository_type == 'iso' - name: include prerequisite_rhcs_cdn_install_debian.yml - include: prerequisite_rhcs_cdn_install_debian.yml + include_tasks: prerequisite_rhcs_cdn_install_debian.yml when: - ceph_repository_type == 'cdn' diff --git a/roles/ceph-common/tasks/installs/install_on_debian.yml b/roles/ceph-common/tasks/installs/install_on_debian.yml index e60797587..9b8471ba1 100644 --- a/roles/ceph-common/tasks/installs/install_on_debian.yml +++ b/roles/ceph-common/tasks/installs/install_on_debian.yml @@ -1,6 +1,6 @@ --- - name: include configure_debian_repository_installation.yml - include: configure_debian_repository_installation.yml + include_tasks: configure_debian_repository_installation.yml when: - ceph_origin == 'repository' @@ -28,13 +28,13 @@ cache_valid_time: 3600 - name: include install_debian_packages.yml - include: install_debian_packages.yml + include_tasks: install_debian_packages.yml when: - (ceph_origin == 'repository' or ceph_origin == 'distro') - ceph_repository != 'rhcs' - name: include install_debian_rhcs_packages.yml - include: install_debian_rhcs_packages.yml + include_tasks: install_debian_rhcs_packages.yml when: - (ceph_origin == 'repository' or ceph_origin == 'distro') - ceph_repository == 'rhcs' diff --git a/roles/ceph-common/tasks/installs/install_on_redhat.yml b/roles/ceph-common/tasks/installs/install_on_redhat.yml index 3ddb88ef0..52e4eb0e7 100644 --- a/roles/ceph-common/tasks/installs/install_on_redhat.yml +++ b/roles/ceph-common/tasks/installs/install_on_redhat.yml @@ -1,15 +1,15 @@ --- - name: include configure_redhat_repository_installation.yml - include: configure_redhat_repository_installation.yml + include_tasks: configure_redhat_repository_installation.yml when: - ceph_origin == 'repository' - name: include configure_redhat_local_installation.yml - include: configure_redhat_local_installation.yml + include_tasks: configure_redhat_local_installation.yml when: - ceph_origin == 'local' - name: include install_redhat_packages.yml - include: install_redhat_packages.yml + include_tasks: install_redhat_packages.yml when: - (ceph_origin == 'repository' or ceph_origin == 'distro') diff --git a/roles/ceph-common/tasks/installs/install_on_suse.yml b/roles/ceph-common/tasks/installs/install_on_suse.yml index 57b4a2ef2..c8921a68e 100644 --- a/roles/ceph-common/tasks/installs/install_on_suse.yml +++ b/roles/ceph-common/tasks/installs/install_on_suse.yml @@ -9,7 +9,7 @@ - ceph_origin != 'distro' or (ceph_origin == 'repository' and ceph_repository != 'obs') - name: include configure_suse_repository_installation.yml - include: configure_suse_repository_installation.yml + include_tasks: configure_suse_repository_installation.yml when: - ceph_origin == 'repository' @@ -21,4 +21,4 @@ with_items: "{{ suse_package_dependencies }}" - name: include install_suse_packages.yml - include: install_suse_packages.yml + include_tasks: install_suse_packages.yml diff --git a/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml b/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml index 3fdac2b05..cfb5e481a 100644 --- a/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml @@ -1,10 +1,10 @@ --- - name: include prerequisite_rhcs_iso_install.yml - include: prerequisite_rhcs_iso_install.yml + include_tasks: prerequisite_rhcs_iso_install.yml when: - ceph_repository_type == 'iso' - name: include prerequisite_rhcs_cdn_install.yml - include: prerequisite_rhcs_cdn_install.yml + include_tasks: prerequisite_rhcs_cdn_install.yml when: - ceph_repository_type == 'cdn' diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index c485dd6d3..9a024246f 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -63,7 +63,7 @@ # override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory - name: include release-rhcs.yml - include: release-rhcs.yml + include_tasks: release-rhcs.yml when: - ceph_repository in ['rhcs', 'dev'] tags: @@ -85,7 +85,7 @@ static: False - name: include facts_mon_fsid.yml - include: facts_mon_fsid.yml + include_tasks: facts_mon_fsid.yml run_once: true when: - cephx @@ -94,13 +94,13 @@ - ceph_current_status.fsid is defined - name: include create_rbd_client_dir.yml - include: create_rbd_client_dir.yml + include_tasks: create_rbd_client_dir.yml - name: include configure_cluster_name.yml - include: configure_cluster_name.yml + include_tasks: configure_cluster_name.yml - name: include configure_memory_allocator.yml - include: configure_memory_allocator.yml + include_tasks: configure_memory_allocator.yml when: - (ceph_tcmalloc_max_total_thread_cache | int) > 0 - osd_objectstore == 'filestore' diff --git a/roles/ceph-defaults/tasks/check_running_cluster.yml b/roles/ceph-defaults/tasks/check_running_cluster.yml index b41e42f1f..0418d2ffe 100644 --- a/roles/ceph-defaults/tasks/check_running_cluster.yml +++ b/roles/ceph-defaults/tasks/check_running_cluster.yml @@ -1,10 +1,10 @@ --- - name: include check_running_containers.yml - include: check_running_containers.yml + include_tasks: check_running_containers.yml when: - containerized_deployment - name: include check_socket_non_container.yml - include: check_socket_non_container.yml + include_tasks: check_socket_non_container.yml when: - not containerized_deployment diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml index 112c37c77..3559ee8bd 100644 --- a/roles/ceph-defaults/tasks/main.yml +++ b/roles/ceph-defaults/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: include check_running_cluster.yml - include: check_running_cluster.yml + include_tasks: check_running_cluster.yml - name: include facts.yml - include: facts.yml + include_tasks: facts.yml - name: include create_ceph_initial_dirs.yml - include: create_ceph_initial_dirs.yml \ No newline at end of file + include_tasks: create_ceph_initial_dirs.yml diff --git a/roles/ceph-docker-common/tasks/checks.yml b/roles/ceph-docker-common/tasks/checks.yml index f96d90635..b892cc642 100644 --- a/roles/ceph-docker-common/tasks/checks.yml +++ b/roles/ceph-docker-common/tasks/checks.yml @@ -1,6 +1,6 @@ --- - name: include stat_ceph_files.yml - include: stat_ceph_files.yml + include_tasks: stat_ceph_files.yml - name: fail if we find existing cluster files fail: diff --git a/roles/ceph-docker-common/tasks/main.yml b/roles/ceph-docker-common/tasks/main.yml index 13ff5270d..d0110c8a7 100644 --- a/roles/ceph-docker-common/tasks/main.yml +++ b/roles/ceph-docker-common/tasks/main.yml @@ -1,12 +1,12 @@ --- - name: include system_checks.yml - include: system_checks.yml + include_tasks: system_checks.yml - name: include check_mandatory_vars.yml - include: check_mandatory_vars.yml + include_tasks: check_mandatory_vars.yml - name: include pre_requisites/prerequisites.yml - include: pre_requisites/prerequisites.yml + include_tasks: pre_requisites/prerequisites.yml when: - not is_atomic @@ -47,7 +47,7 @@ check_mode: no - name: include checks.yml - include: checks.yml + include_tasks: checks.yml when: - (not containerized_deployment_with_kv and ((inventory_hostname in groups.get(mon_group_name, [])) or @@ -57,27 +57,27 @@ - not rolling_update | default(false) - name: include misc/ntp_atomic.yml - include: misc/ntp_atomic.yml + include_tasks: misc/ntp_atomic.yml when: - is_atomic - ansible_os_family == 'RedHat' - ntp_service_enabled - name: include misc/ntp_rpm.yml - include: misc/ntp_rpm.yml + include_tasks: misc/ntp_rpm.yml when: - not is_atomic - ansible_os_family in ['RedHat', 'Suse'] - ntp_service_enabled - name: include misc/ntp_debian.yml - include: misc/ntp_debian.yml + include_tasks: misc/ntp_debian.yml when: - ansible_os_family == 'Debian' - ntp_service_enabled - name: include fetch_image.yml - include: fetch_image.yml + include_tasks: fetch_image.yml tags: - fetch_container_image @@ -92,9 +92,9 @@ ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" - name: include release.yml - include: release.yml + include_tasks: release.yml # NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml # # because it creates the directories needed by the latter. - name: include dirs_permissions.yml - include: dirs_permissions.yml + include_tasks: dirs_permissions.yml diff --git a/roles/ceph-docker-common/tasks/misc/ntp_atomic.yml b/roles/ceph-docker-common/tasks/misc/ntp_atomic.yml index b8a753450..fe6e268cf 100644 --- a/roles/ceph-docker-common/tasks/misc/ntp_atomic.yml +++ b/roles/ceph-docker-common/tasks/misc/ntp_atomic.yml @@ -1,6 +1,6 @@ --- - name: include ../checks/check_ntp_atomic.yml - include: ../checks/check_ntp_atomic.yml + include_tasks: ../checks/check_ntp_atomic.yml when: is_atomic - name: start the ntp service diff --git a/roles/ceph-docker-common/tasks/misc/ntp_debian.yml b/roles/ceph-docker-common/tasks/misc/ntp_debian.yml index 7eab2e410..6441daa40 100644 --- a/roles/ceph-docker-common/tasks/misc/ntp_debian.yml +++ b/roles/ceph-docker-common/tasks/misc/ntp_debian.yml @@ -1,6 +1,6 @@ --- - name: include ../checks/check_ntp_debian.yml - include: ../checks/check_ntp_debian.yml + include_tasks: ../checks/check_ntp_debian.yml when: - ansible_os_family == 'Debian' diff --git a/roles/ceph-docker-common/tasks/misc/ntp_rpm.yml b/roles/ceph-docker-common/tasks/misc/ntp_rpm.yml index 5384604eb..6f2a58a57 100644 --- a/roles/ceph-docker-common/tasks/misc/ntp_rpm.yml +++ b/roles/ceph-docker-common/tasks/misc/ntp_rpm.yml @@ -1,6 +1,6 @@ --- - name: include ../checks/check_ntp_rpm.yml - include: ../checks/check_ntp_rpm.yml + include_tasks: ../checks/check_ntp_rpm.yml when: - ansible_os_family in ['RedHat', 'Suse'] diff --git a/roles/ceph-docker-common/tasks/pre_requisites/prerequisites.yml b/roles/ceph-docker-common/tasks/pre_requisites/prerequisites.yml index 02dd814d6..1124264e1 100644 --- a/roles/ceph-docker-common/tasks/pre_requisites/prerequisites.yml +++ b/roles/ceph-docker-common/tasks/pre_requisites/prerequisites.yml @@ -1,9 +1,9 @@ --- - name: include remove_ceph_udev_rules.yml - include: remove_ceph_udev_rules.yml + include_tasks: remove_ceph_udev_rules.yml - name: include debian_prerequisites.yml - include: debian_prerequisites.yml + include_tasks: debian_prerequisites.yml when: - ansible_distribution == 'Debian' tags: diff --git a/roles/ceph-iscsi-gw/tasks/main.yml b/roles/ceph-iscsi-gw/tasks/main.yml index f498122c7..77cfe618e 100644 --- a/roles/ceph-iscsi-gw/tasks/main.yml +++ b/roles/ceph-iscsi-gw/tasks/main.yml @@ -1,9 +1,9 @@ --- - name: include common.yml - include: common.yml + include_tasks: common.yml - name: include non-container/prerequisites.yml - include: non-container/prerequisites.yml + include_tasks: non-container/prerequisites.yml when: - not containerized_deployment @@ -11,16 +11,16 @@ # and transfers them to /etc/ceph directory on each controller. SSL certs are used by # the API for https support. - name: include deploy_ssl_keys.yml - include: deploy_ssl_keys.yml + include_tasks: deploy_ssl_keys.yml when: - generate_crt|bool - name: include non-container/configure_iscsi.yml - include: non-container/configure_iscsi.yml + include_tasks: non-container/configure_iscsi.yml when: - not containerized_deployment - name: include containerized.yml - include: container/containerized.yml + include_tasks: container/containerized.yml when: - containerized_deployment diff --git a/roles/ceph-mds/tasks/main.yml b/roles/ceph-mds/tasks/main.yml index d012751ea..a63297199 100644 --- a/roles/ceph-mds/tasks/main.yml +++ b/roles/ceph-mds/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: include create_mds_filesystems.yml - include: create_mds_filesystems.yml + include_tasks: create_mds_filesystems.yml when: - inventory_hostname == groups[mds_group_name] | first @@ -11,12 +11,12 @@ - containerized_deployment - name: include common.yml - include: common.yml + include_tasks: common.yml - name: non_containerized.yml - include: non_containerized.yml + include_tasks: non_containerized.yml when: not containerized_deployment - name: containerized.yml - include: containerized.yml + include_tasks: containerized.yml when: containerized_deployment diff --git a/roles/ceph-mgr/tasks/docker/main.yml b/roles/ceph-mgr/tasks/docker/main.yml index 257c62c1d..1737f1b6f 100644 --- a/roles/ceph-mgr/tasks/docker/main.yml +++ b/roles/ceph-mgr/tasks/docker/main.yml @@ -1,3 +1,3 @@ --- - name: include start_docker_mgr.yml - include: start_docker_mgr.yml + include_tasks: start_docker_mgr.yml diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml index a2eda822d..410fa1d9a 100644 --- a/roles/ceph-mgr/tasks/main.yml +++ b/roles/ceph-mgr/tasks/main.yml @@ -6,14 +6,14 @@ - containerized_deployment - name: include common.yml - include: common.yml + include_tasks: common.yml - name: include pre_requisite.yml - include: pre_requisite.yml + include_tasks: pre_requisite.yml when: not containerized_deployment - name: include docker/main.yml - include: docker/main.yml + include_tasks: docker/main.yml when: containerized_deployment - name: get enabled modules from ceph-mgr diff --git a/roles/ceph-mon/tasks/docker/main.yml b/roles/ceph-mon/tasks/docker/main.yml index 88273f42a..5703761c7 100644 --- a/roles/ceph-mon/tasks/docker/main.yml +++ b/roles/ceph-mon/tasks/docker/main.yml @@ -1,13 +1,13 @@ --- - name: include copy_configs.yml - include: copy_configs.yml + include_tasks: copy_configs.yml when: not containerized_deployment_with_kv - name: include start_docker_monitor.yml - include: start_docker_monitor.yml + include_tasks: start_docker_monitor.yml - name: include configure_ceph_command_aliases.yml - include: configure_ceph_command_aliases.yml + include_tasks: configure_ceph_command_aliases.yml - name: wait for monitor socket to exist command: "{{ docker_exec_cmd }} sh -c 'stat /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok'" @@ -77,7 +77,7 @@ - hostvars[groups[mon_group_name][0]]['monitor_address_block'] != 'subnet' - name: include fetch_configs.yml - include: fetch_configs.yml + include_tasks: fetch_configs.yml run_once: true when: not containerized_deployment_with_kv diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml index 1b212f77d..3cf192d09 100644 --- a/roles/ceph-mon/tasks/main.yml +++ b/roles/ceph-mon/tasks/main.yml @@ -6,11 +6,11 @@ - containerized_deployment - name: include deploy_monitors.yml - include: deploy_monitors.yml + include_tasks: deploy_monitors.yml when: not containerized_deployment - name: include start_monitor.yml - include: start_monitor.yml + import_tasks: start_monitor.yml when: not containerized_deployment - name: include ceph_keys.yml @@ -20,7 +20,7 @@ static: no - name: include secure_cluster.yml - include: secure_cluster.yml + include_tasks: secure_cluster.yml delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: @@ -28,17 +28,17 @@ - not containerized_deployment - name: include docker/main.yml - include: docker/main.yml + include_tasks: docker/main.yml when: containerized_deployment - name: crush_rules.yml - include: crush_rules.yml + include_tasks: crush_rules.yml when: - crush_rule_config - name: include set_osd_pool_default_pg_num.yml - include: set_osd_pool_default_pg_num.yml + include_tasks: set_osd_pool_default_pg_num.yml - name: include calamari.yml - include: calamari.yml + include_tasks: calamari.yml when: calamari diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml index efa59f627..83482d35e 100644 --- a/roles/ceph-nfs/tasks/main.yml +++ b/roles/ceph-nfs/tasks/main.yml @@ -6,28 +6,28 @@ - containerized_deployment - name: include common.yml - include: common.yml + include_tasks: common.yml - name: include pre_requisite_non_container.yml - include: pre_requisite_non_container.yml + include_tasks: pre_requisite_non_container.yml when: - not containerized_deployment - name: include pre_requisite_container.yml - include: pre_requisite_container.yml + include_tasks: pre_requisite_container.yml when: - containerized_deployment - name: include create_rgw_nfs_user.yml - include: create_rgw_nfs_user.yml + import_tasks: create_rgw_nfs_user.yml # NOTE (leseb): workaround for issues with ganesha and librgw - name: include ganesha_selinux_fix.yml - include: ganesha_selinux_fix.yml + import_tasks: ganesha_selinux_fix.yml when: - not containerized_deployment - ansible_os_family == 'RedHat' - ansible_distribution_version >= '7.4' - name: include start_nfs.yml - include: start_nfs.yml + import_tasks: start_nfs.yml diff --git a/roles/ceph-osd/tasks/docker/main.yml b/roles/ceph-osd/tasks/docker/main.yml index fde59c2d8..91b56ceb2 100644 --- a/roles/ceph-osd/tasks/docker/main.yml +++ b/roles/ceph-osd/tasks/docker/main.yml @@ -1,3 +1,3 @@ --- - name: include start_docker_osd.yml - include: start_docker_osd.yml + include_tasks: start_docker_osd.yml diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 01e23e801..15399c72a 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -18,10 +18,10 @@ static: False - name: include ceph_disk_cli_options_facts.yml - include: ceph_disk_cli_options_facts.yml + include_tasks: ceph_disk_cli_options_facts.yml - name: include build_devices.yml - include: build_devices.yml + include_tasks: build_devices.yml - name: read information about the devices parted: @@ -31,7 +31,7 @@ with_items: "{{ devices }}" - name: include check_gpt.yml - include: check_gpt.yml + include_tasks: check_gpt.yml when: - osd_scenario != 'lvm' @@ -107,7 +107,7 @@ # Create the pools listed in openstack_pools - name: include openstack_config.yml - include: openstack_config.yml + include_tasks: openstack_config.yml when: - openstack_config - inventory_hostname == groups[osd_group_name] | last diff --git a/roles/ceph-rbd-mirror/tasks/docker/main.yml b/roles/ceph-rbd-mirror/tasks/docker/main.yml index 8368f9ff2..8c2e8a866 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/main.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/main.yml @@ -1,3 +1,3 @@ --- - name: include start_docker_rbd_mirror.yml - include: start_docker_rbd_mirror.yml + include_tasks: start_docker_rbd_mirror.yml diff --git a/roles/ceph-rbd-mirror/tasks/main.yml b/roles/ceph-rbd-mirror/tasks/main.yml index 526ae59ac..82a33975d 100644 --- a/roles/ceph-rbd-mirror/tasks/main.yml +++ b/roles/ceph-rbd-mirror/tasks/main.yml @@ -6,25 +6,25 @@ - containerized_deployment - name: include common.yml - include: common.yml + include_tasks: common.yml - name: include pre_requisite.yml - include: pre_requisite.yml + include_tasks: pre_requisite.yml when: - not containerized_deployment - name: include start_rbd_mirror.yml - include: start_rbd_mirror.yml + include_tasks: start_rbd_mirror.yml when: - not containerized_deployment - name: include configure_mirroring.yml - include: configure_mirroring.yml + include_tasks: configure_mirroring.yml when: - ceph_rbd_mirror_configure - not containerized_deployment - name: include docker/main.yml - include: docker/main.yml + include_tasks: docker/main.yml when: - containerized_deployment diff --git a/roles/ceph-restapi/tasks/docker/main.yml b/roles/ceph-restapi/tasks/docker/main.yml index 0e992d15d..71ce252c2 100644 --- a/roles/ceph-restapi/tasks/docker/main.yml +++ b/roles/ceph-restapi/tasks/docker/main.yml @@ -1,6 +1,6 @@ --- - name: include copy_configs.yml - include: copy_configs.yml + include_tasks: copy_configs.yml - name: include start_docker_restapi.yml - include: start_docker_restapi.yml + include_tasks: start_docker_restapi.yml diff --git a/roles/ceph-restapi/tasks/main.yml b/roles/ceph-restapi/tasks/main.yml index 851b78ea0..585bae63d 100644 --- a/roles/ceph-restapi/tasks/main.yml +++ b/roles/ceph-restapi/tasks/main.yml @@ -6,16 +6,16 @@ - containerized_deployment - name: include pre_requisite.yml - include: pre_requisite.yml + include_tasks: pre_requisite.yml when: - not containerized_deployment - name: include start_restapi.yml - include: start_restapi.yml + include_tasks: start_restapi.yml when: - not containerized_deployment - name: include docker/main.yml - include: docker/main.yml + include_tasks: docker/main.yml when: - containerized_deployment diff --git a/roles/ceph-rgw/tasks/docker/main.yml b/roles/ceph-rgw/tasks/docker/main.yml index 0928e5739..539c42303 100644 --- a/roles/ceph-rgw/tasks/docker/main.yml +++ b/roles/ceph-rgw/tasks/docker/main.yml @@ -1,3 +1,3 @@ --- - name: include start_docker_rgw.yml - include: start_docker_rgw.yml + include_tasks: start_docker_rgw.yml diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 5641407ba..9abb84424 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: include common.yml - include: common.yml + include_tasks: common.yml - name: include pre_requisite.yml include: pre_requisite.yml diff --git a/roles/ceph-rgw/tasks/multisite/main.yml b/roles/ceph-rgw/tasks/multisite/main.yml index 6485e7338..1f1b1a6fb 100644 --- a/roles/ceph-rgw/tasks/multisite/main.yml +++ b/roles/ceph-rgw/tasks/multisite/main.yml @@ -1,6 +1,6 @@ --- - name: include multisite checks - include: checks.yml + include_tasks: checks.yml # Include the tasks depending on the zone type - name: include master multisite tasks diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index 8aed76ad3..71b490431 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -58,17 +58,17 @@ - ceph_release_num[ceph_release] < ceph_release_num.luminous - name: include check_system.yml - include: check_system.yml + include_tasks: check_system.yml - name: include check_devices.yml - include: check_devices.yml + include_tasks: check_devices.yml when: - osd_group_name in group_names - not osd_auto_discovery | default(False) - osd_scenario != "lvm" and devices is not defined - name: include check_eth_mon.yml - include: check_eth_mon.yml + include_tasks: check_eth_mon.yml when: - mon_group_name in group_names - monitor_interface != "dummy" @@ -76,7 +76,7 @@ - monitor_address_block == "subnet" - name: include check_eth_rgw.yml - include: check_eth_rgw.yml + include_tasks: check_eth_rgw.yml when: - rgw_group_name in group_names - radosgw_interface != "dummy" From dc3319c3c4e2fb58cb1b5e6c60f165ed28260dc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 27 Sep 2018 09:57:26 +0200 Subject: [PATCH 011/105] default: use bluestore as default object store MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All tooling in Ceph is defaulting to use the bluestore objectstore for provisioning OSDs, there is no good reason for ceph-ansible to continue to default to filestore. Closes: https://github.com/ceph/ceph-ansible/issues/3149 Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1633508 Signed-off-by: Sébastien Han --- group_vars/all.yml.sample | 2 +- group_vars/rhcs.yml.sample | 2 +- roles/ceph-defaults/defaults/main.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 1eaade9b9..43fdddf5c 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -370,7 +370,7 @@ dummy: #osd_mkfs_type: xfs #osd_mkfs_options_xfs: -f -i size=2048 #osd_mount_options_xfs: noatime,largeio,inode64,swalloc -#osd_objectstore: filestore +#osd_objectstore: bluestore # xattrs. by default, 'filestore xattr use omap' is set to 'true' if # 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index d7696123c..86aa44839 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -370,7 +370,7 @@ ceph_rhcs_version: 3 #osd_mkfs_type: xfs #osd_mkfs_options_xfs: -f -i size=2048 #osd_mount_options_xfs: noatime,largeio,inode64,swalloc -#osd_objectstore: filestore +#osd_objectstore: bluestore # xattrs. by default, 'filestore xattr use omap' is set to 'true' if # 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index a935b7d52..6f117c9bd 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -362,7 +362,7 @@ cluster_network: "{{ public_network | regex_replace(' ', '') }}" osd_mkfs_type: xfs osd_mkfs_options_xfs: -f -i size=2048 osd_mount_options_xfs: noatime,largeio,inode64,swalloc -osd_objectstore: filestore +osd_objectstore: bluestore # xattrs. by default, 'filestore xattr use omap' is set to 'true' if # 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can From 145aef9fed0e44259fb15902c7ebbe742f46771a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 27 Sep 2018 10:21:17 +0200 Subject: [PATCH 012/105] defaults: do not disable THP on bluestore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As per #1013 it appears that BS will soon use THP to lower TLB misses, also disabling THP hasn't demonstrated any gains so far. Closes: https://github.com/ceph/ceph-ansible/issues/1013 Signed-off-by: Sébastien Han --- group_vars/all.yml.sample | 2 +- group_vars/rhcs.yml.sample | 2 +- roles/ceph-defaults/defaults/main.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 43fdddf5c..7d6b0a879 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -495,7 +495,7 @@ dummy: # OS TUNING # ############# -#disable_transparent_hugepage: true +#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}" #os_tuning_params: # - { name: fs.file-max, value: 26234859 } # - { name: vm.zone_reclaim_mode, value: 0 } diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 86aa44839..055ad00f2 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -495,7 +495,7 @@ ceph_rhcs_version: 3 # OS TUNING # ############# -#disable_transparent_hugepage: true +#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}" #os_tuning_params: # - { name: fs.file-max, value: 26234859 } # - { name: vm.zone_reclaim_mode, value: 0 } diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 6f117c9bd..8fd965582 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -487,7 +487,7 @@ ceph_conf_overrides: {} # OS TUNING # ############# -disable_transparent_hugepage: true +disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}" os_tuning_params: - { name: fs.file-max, value: 26234859 } - { name: vm.zone_reclaim_mode, value: 0 } From 9fe86c22682c7e5eddc610d97c12d4e7eb254102 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 27 Sep 2018 17:29:38 +0200 Subject: [PATCH 013/105] test: use osd_objecstore default value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do not force filestore on our test but whatever is the default of osd_objecstore. Signed-off-by: Sébastien Han --- tests/functional/centos/7/cluster/group_vars/all | 2 -- tests/functional/centos/7/docker-collocation/group_vars/all | 2 -- tests/functional/centos/7/docker/group_vars/all | 2 -- 3 files changed, 6 deletions(-) diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index e33f448af..46934a5b0 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -4,9 +4,7 @@ ceph_repository: community cluster: test public_network: "192.168.1.0/24" cluster_network: "192.168.2.0/24" -journal_size: 100 radosgw_interface: eth1 -osd_objectstore: filestore ceph_conf_overrides: global: osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/docker-collocation/group_vars/all b/tests/functional/centos/7/docker-collocation/group_vars/all index 99b2e5c1e..2324ec340 100644 --- a/tests/functional/centos/7/docker-collocation/group_vars/all +++ b/tests/functional/centos/7/docker-collocation/group_vars/all @@ -8,9 +8,7 @@ cluster: test monitor_interface: eth1 radosgw_interface: eth1 ceph_mon_docker_subnet: "{{ public_network }}" -journal_size: 100 ceph_docker_on_openstack: False -osd_objectstore: filestore public_network: "192.168.15.0/24" cluster_network: "192.168.16.0/24" rgw_override_bucket_index_max_shards: 16 diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index 9366db469..c3b5a92d0 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -8,9 +8,7 @@ cluster: test monitor_interface: eth1 radosgw_interface: eth1 ceph_mon_docker_subnet: "{{ public_network }}" -journal_size: 100 ceph_docker_on_openstack: False -osd_objectstore: filestore public_network: "192.168.17.0/24" cluster_network: "192.168.18.0/24" rgw_override_bucket_index_max_shards: 16 From 5da71e1ca1156af88d3ff1bedf905efe4fb47fab Mon Sep 17 00:00:00 2001 From: wumingqiao Date: Fri, 28 Sep 2018 16:58:56 +0800 Subject: [PATCH 014/105] purge-cluster: recursively remove ceph-related files, symlinks and directories under /etc/systemd/system. fix: https://github.com/ceph/ceph-ansible/issues/3166 Signed-off-by: wumingqiao --- infrastructure-playbooks/purge-cluster.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 64d8fc005..7e05ee864 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -633,6 +633,8 @@ find: paths: "/etc/systemd/system" pattern: "ceph*" + recurse: true + file_type: any register: systemd_files - name: remove ceph systemd unit files From 9747f3dbd5a2eada543a6f61e482e005b6660016 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Fri, 21 Sep 2018 14:46:30 -0500 Subject: [PATCH 015/105] purge-cluster: zap devices used with the lvm scenario Fixes: https://github.com/ceph/ceph-ansible/issues/3156 Signed-off-by: Andrew Schoen --- infrastructure-playbooks/purge-cluster.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 7e05ee864..7e63b927e 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -350,7 +350,7 @@ failed_when: false register: ceph_lockbox_partition_to_erase_path - - name: zap and destroy OSDs created by ceph-volume + - name: zap and destroy osds created by ceph-volume with lvm_volumes ceph_volume: data: "{{ item.data }}" data_vg: "{{ item.data_vg|default(omit) }}" @@ -367,6 +367,16 @@ when: - osd_scenario == "lvm" + - name: zap and destroy osds created by ceph-volume with devices + ceph_volume: + data: "{{ item }}" + action: "zap" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: "{{ devices | default([]) }}" + when: + - osd_scenario == "lvm" + - name: get ceph block partitions shell: | blkid -o device -t PARTLABEL="ceph block" From 4db6a213f72fef5f3cdabdb8adeb9dd8b25df5c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 27 Jul 2018 16:56:09 +0200 Subject: [PATCH 016/105] add ceph-handler role MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The role contains all the handlers for Ceph services. We decided to leave ceph-defaults role with variables and a few facts only. This is useful when organizing the site.yml files and also adding the known variables to infrastructure-playbooks. Signed-off-by: Sébastien Han --- .../tasks/create_ceph_initial_dirs.yml | 0 roles/ceph-config/tasks/main.yml | 3 + roles/ceph-defaults/tasks/main.yml | 8 +- roles/ceph-handler/LICENSE | 201 ++++++++++++++++++ roles/ceph-handler/README.md | 2 + .../handlers/main.yml | 0 roles/ceph-handler/meta/main.yml | 13 ++ .../tasks/check_running_cluster.yml | 0 .../tasks/check_running_containers.yml | 0 .../tasks/check_socket_non_container.yml | 0 roles/ceph-handler/tasks/main.yml | 3 + .../templates/restart_mds_daemon.sh.j2 | 0 .../templates/restart_mgr_daemon.sh.j2 | 0 .../templates/restart_mon_daemon.sh.j2 | 0 .../templates/restart_nfs_daemon.sh.j2 | 0 .../templates/restart_osd_daemon.sh.j2 | 0 .../templates/restart_rbd_mirror_daemon.sh.j2 | 0 .../templates/restart_rbd_target_api.sh.j2 | 0 .../templates/restart_rbd_target_gw.sh.j2 | 0 .../templates/restart_rgw_daemon.sh.j2 | 0 .../templates/restart_tcmu_runner.sh.j2 | 0 site-docker.yml.sample | 15 +- site.yml.sample | 11 + 23 files changed, 247 insertions(+), 9 deletions(-) rename roles/{ceph-defaults => ceph-config}/tasks/create_ceph_initial_dirs.yml (100%) create mode 100644 roles/ceph-handler/LICENSE create mode 100644 roles/ceph-handler/README.md rename roles/{ceph-defaults => ceph-handler}/handlers/main.yml (100%) create mode 100644 roles/ceph-handler/meta/main.yml rename roles/{ceph-defaults => ceph-handler}/tasks/check_running_cluster.yml (100%) rename roles/{ceph-defaults => ceph-handler}/tasks/check_running_containers.yml (100%) rename roles/{ceph-defaults => ceph-handler}/tasks/check_socket_non_container.yml (100%) create mode 100644 roles/ceph-handler/tasks/main.yml rename roles/{ceph-defaults => ceph-handler}/templates/restart_mds_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_mgr_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_mon_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_nfs_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_osd_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_rbd_mirror_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_rbd_target_api.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_rbd_target_gw.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_rgw_daemon.sh.j2 (100%) rename roles/{ceph-defaults => ceph-handler}/templates/restart_tcmu_runner.sh.j2 (100%) diff --git a/roles/ceph-defaults/tasks/create_ceph_initial_dirs.yml b/roles/ceph-config/tasks/create_ceph_initial_dirs.yml similarity index 100% rename from roles/ceph-defaults/tasks/create_ceph_initial_dirs.yml rename to roles/ceph-config/tasks/create_ceph_initial_dirs.yml diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 0e1e4389c..5e5b3526e 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -1,4 +1,7 @@ --- +- name: include create_ceph_initial_dirs.yml + include: create_ceph_initial_dirs.yml + # ceph-common - block: - name: create ceph conf directory diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml index 3559ee8bd..0d1f7c93c 100644 --- a/roles/ceph-defaults/tasks/main.yml +++ b/roles/ceph-defaults/tasks/main.yml @@ -1,9 +1,3 @@ --- -- name: include check_running_cluster.yml - include_tasks: check_running_cluster.yml - - name: include facts.yml - include_tasks: facts.yml - -- name: include create_ceph_initial_dirs.yml - include_tasks: create_ceph_initial_dirs.yml + include: facts.yml diff --git a/roles/ceph-handler/LICENSE b/roles/ceph-handler/LICENSE new file mode 100644 index 000000000..b0d1c9fc8 --- /dev/null +++ b/roles/ceph-handler/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Guillaume Abrioux] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-handler/README.md b/roles/ceph-handler/README.md new file mode 100644 index 000000000..3145a7f94 --- /dev/null +++ b/roles/ceph-handler/README.md @@ -0,0 +1,2 @@ +# Ansible role: ceph-handler +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-defaults/handlers/main.yml b/roles/ceph-handler/handlers/main.yml similarity index 100% rename from roles/ceph-defaults/handlers/main.yml rename to roles/ceph-handler/handlers/main.yml diff --git a/roles/ceph-handler/meta/main.yml b/roles/ceph-handler/meta/main.yml new file mode 100644 index 000000000..acb144c8c --- /dev/null +++ b/roles/ceph-handler/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Sébastien Han + description: Contains handlers for Ceph services + license: Apache + min_ansible_version: 2.3 + platforms: + - name: EL + versions: + - 7 + categories: + - system +dependencies: [] diff --git a/roles/ceph-defaults/tasks/check_running_cluster.yml b/roles/ceph-handler/tasks/check_running_cluster.yml similarity index 100% rename from roles/ceph-defaults/tasks/check_running_cluster.yml rename to roles/ceph-handler/tasks/check_running_cluster.yml diff --git a/roles/ceph-defaults/tasks/check_running_containers.yml b/roles/ceph-handler/tasks/check_running_containers.yml similarity index 100% rename from roles/ceph-defaults/tasks/check_running_containers.yml rename to roles/ceph-handler/tasks/check_running_containers.yml diff --git a/roles/ceph-defaults/tasks/check_socket_non_container.yml b/roles/ceph-handler/tasks/check_socket_non_container.yml similarity index 100% rename from roles/ceph-defaults/tasks/check_socket_non_container.yml rename to roles/ceph-handler/tasks/check_socket_non_container.yml diff --git a/roles/ceph-handler/tasks/main.yml b/roles/ceph-handler/tasks/main.yml new file mode 100644 index 000000000..09280cdee --- /dev/null +++ b/roles/ceph-handler/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: include check_running_cluster.yml + include: check_running_cluster.yml \ No newline at end of file diff --git a/roles/ceph-defaults/templates/restart_mds_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_mds_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_mds_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_mgr_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_mgr_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mon_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_mon_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_nfs_daemon.sh.j2 b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_nfs_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_osd_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_rbd_mirror_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_rbd_mirror_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_rbd_target_api.sh.j2 b/roles/ceph-handler/templates/restart_rbd_target_api.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_rbd_target_api.sh.j2 rename to roles/ceph-handler/templates/restart_rbd_target_api.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_rbd_target_gw.sh.j2 b/roles/ceph-handler/templates/restart_rbd_target_gw.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_rbd_target_gw.sh.j2 rename to roles/ceph-handler/templates/restart_rbd_target_gw.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_rgw_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_rgw_daemon.sh.j2 rename to roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 diff --git a/roles/ceph-defaults/templates/restart_tcmu_runner.sh.j2 b/roles/ceph-handler/templates/restart_tcmu_runner.sh.j2 similarity index 100% rename from roles/ceph-defaults/templates/restart_tcmu_runner.sh.j2 rename to roles/ceph-handler/templates/restart_tcmu_runner.sh.j2 diff --git a/site-docker.yml.sample b/site-docker.yml.sample index bb5c145d6..7d2e0fb7b 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -54,6 +54,7 @@ - role: ceph-defaults tags: [with_pkg, fetch_container_image] - role: ceph-validate + - role: ceph-handler - role: ceph-docker-common tags: [with_pkg, fetch_container_image] when: @@ -85,6 +86,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -115,6 +117,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -146,6 +149,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -173,6 +177,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -200,6 +205,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -227,6 +233,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -258,6 +265,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -285,6 +293,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] @@ -316,6 +325,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-docker-common when: - inventory_hostname == groups.get('clients', ['']) | first @@ -346,7 +356,8 @@ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" roles: - { role: ceph-defaults, tags: ['ceph_update_config'] } - - { role: ceph-docker-common } + - role: ceph-handler + - ceph-docker-common - { role: ceph-config, tags: ['ceph_update_config'], when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" } - { role: ceph-iscsi-gw, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" } post_tasks: @@ -375,4 +386,4 @@ msg: "{{ ceph_status.stdout_lines }}" delegate_to: "{{ groups['mons'][0] }}" run_once: true - when: not ceph_status.failed \ No newline at end of file + when: not ceph_status.failed diff --git a/site.yml.sample b/site.yml.sample index 3ab96a49c..a8a4342ff 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -91,6 +91,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -118,6 +119,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -149,6 +151,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -176,6 +179,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -203,6 +207,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -230,6 +235,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -257,6 +263,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -288,6 +295,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -319,6 +327,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -346,6 +355,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] @@ -375,6 +385,7 @@ roles: - role: ceph-defaults tags: ['ceph_update_config'] + - role: ceph-handler - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] From 54adb6d89425e277fdc4f339a5480a6e7ab8524c Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 21 Sep 2018 09:28:33 -0400 Subject: [PATCH 017/105] doc: redo lvm scenario documentation, improved wording and config descriptions Signed-off-by: Alfredo Deza --- docs/source/osds/scenarios.rst | 427 ++++++++++++++++++--------------- 1 file changed, 240 insertions(+), 187 deletions(-) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index ed07d70f1..d7d074d18 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -1,12 +1,241 @@ OSD Scenarios ============= -The following are all of the available options for the ``osd_scenario`` config -setting. Defining an ``osd_scenario`` is mandatory for using ``ceph-ansible``. +There are a few *scenarios* that are supported and the differences are mainly +based on the Ceph tooling required to provision OSDs, but can also affect how +devices are being configured to create an OSD. + +Supported values for the required ``osd_scenario`` variable are: + +* :ref:`collocated ` +* :ref:`non-collocated ` +* :ref:`lvm ` + +Since the Ceph mimic release, it is preferred to use the :ref:`lvm scenario +` that uses the ``ceph-volume`` provisioning tool. Any other +scenario will cause deprecation warnings. + + +.. _osd_scenario_lvm: + +lvm +--- + +This OSD scenario uses ``ceph-volume`` to create OSDs, primarily using LVM, and +is only available when the Ceph release is luminous or newer. + +**It is the preferred method of provisioning OSDs.** + +It is enabled with the following setting:: + + + osd_scenario: lvm + +Other (optional) supported settings: + +- ``osd_objectstore``: Set the Ceph *objectstore* for the OSD. Available options + are ``filestore`` or ``bluestore``. You can only select ``bluestore`` with + the Ceph release is luminous or greater. Defaults to ``filestore`` if unset. + +- ``dmcrypt``: Enable Ceph's encryption on OSDs using ``dmcrypt``. + Defaults to ``false`` if unset. + +- ``osds_per_device``: Provision more than 1 OSD (the default if unset) per device. + + +Simple configuration +^^^^^^^^^^^^^^^^^^^^ + +With this approach, most of the decisions on how devices are configured to +provision an OSD are made by the Ceph tooling (``ceph-volume lvm batch`` in +this case). There is almost no room to modify how the OSD is composed given an +input of devices. + +To use this configuration, the ``devices`` option must be populated with the +raw device paths that will be used to provision the OSDs. + + +.. note:: Raw devices must be "clean", without a gpt partition table, or + logical volumes present. + + +For example, for a node that has ``/dev/sda`` and ``/dev/sdb`` intended for +Ceph usage, the configuration would be: + + +.. code-block:: yaml + + osd_scenario: lvm + devices: + - /dev/sda + - /dev/sdb + +In the above case, if both devices are spinning drives, 2 OSDs would be +created, each with its own collocated journal. + +Other provisioning strategies are possible, by mixing spinning and solid state +devices, for example: + +.. code-block:: yaml + + osd_scenario: lvm + devices: + - /dev/sda + - /dev/sdb + - /dev/nvme0n1 + +Similar to the initial example, this would end up producing 2 OSDs, but data +would be placed on the slower spinning drives (``/dev/sda``, and ``/dev/sdb``) +and journals would be placed on the faster solid state device ``/dev/nvme0n1``. +The ``ceph-volume`` tool describes this in detail in +`the "batch" subcommand section `_ + + +Other (optional) supported settings: + +- ``crush_device_class``: Sets the CRUSH device class for all OSDs created with this + method (it is not possible to have a per-OSD CRUSH device class using the *simple* + configuration approach). Values *must be* a string, like + ``crush_device_class: "ssd"`` + + +Advanced configuration +^^^^^^^^^^^^^^^^^^^^^^ + +This configuration is useful when more granular control is wanted when setting +up devices and how they should be arranged to provision an OSD. It requires an +existing setup of volume groups and logical volumes (``ceph-volume`` will **not** +create these). + +To use this configuration, the ``lvm_volumes`` option must be populated with +logical volumes and volume groups. Additionally, absolute paths to partitions +*can* be used for ``journal``, ``block.db``, and ``block.wal``. + +.. note:: This configuration uses ``ceph-volume lvm create`` to provision OSDs + +Supported ``lvm_volumes`` configuration settings: + +- ``data``: The logical volume name or full path to a raw device (an LV will be + created using 100% of the raw device) + +- ``data_vg``: The volume group name, **required** if ``data`` is a logical volume. + +- ``crush_device_class``: CRUSH device class name for the resulting OSD, allows + setting set the device class for each OSD, unlike the global ``crush_device_class`` + that sets them for all OSDs. + +.. note:: If you wish to set the ``crush_device_class`` for the OSDs + when using ``devices`` you must set it using the global ``crush_device_class`` + option as shown above. There is no way to define a specific CRUSH device class + per OSD when using ``devices`` like there is for ``lvm_volumes``. + + +``filestore`` objectstore variables: + +- ``journal``: The logical volume name or full path to a partition. + +- ``journal_vg``: The volume group name, **required** if ``journal`` is a logical volume. + +.. warning:: Each entry must be unique, duplicate values are not allowed + + +``bluestore`` objectstore variables: + +- ``db``: The logical volume name or full path to a partition. + +- ``db_vg``: The volume group name, **required** if ``db`` is a logical volume. + +- ``wal``: The logical volume name or full path to a partition. + +- ``wal_vg``: The volume group name, **required** if ``wal`` is a logical volume. + + +.. note:: These ``bluestore`` variables are optional optimizations. Bluestore's + ``db`` and ``wal`` will only benefit from faster devices. It is possible to + create a bluestore OSD with a single raw device. + +.. warning:: Each entry must be unique, duplicate values are not allowed + + +``bluestore`` example using raw devices: + +.. code-block:: yaml + + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: /dev/sda + - data: /dev/sdb + +.. note:: Volume groups and logical volumes will be created in this case, + utilizing 100% of the devices. + +``bluestore`` example with logical volumes: + +.. code-block:: yaml + + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: data-lv1 + data_vg: data-vg1 + - data: data-lv2 + data_vg: data-vg2 + +.. note:: Volume groups and logical volumes must exist. + + +``bluestore`` example defining ``wal`` and ``db`` logical volumes: + +.. code-block:: yaml + + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: data-lv1 + data_vg: data-vg1 + db: db-lv1 + db_vg: db-vg1 + wal: wal-lv1 + wal_vg: wal-vg1 + - data: data-lv2 + data_vg: data-vg2 + db: db-lv2 + db_vg: db-vg2 + wal: wal-lv2 + wal_vg: wal-vg2 + +.. note:: Volume groups and logical volumes must exist. + + +``filestore`` example with logical volumes: + +.. code-block:: yaml + + osd_objectstore: filestore + osd_scenario: lvm + lvm_volumes: + - data: data-lv1 + data_vg: data-vg1 + journal: journal-lv1 + journal_vg: journal-vg1 + - data: data-lv2 + data_vg: data-vg2 + journal: journal-lv2 + journal_vg: journal-vg2 + +.. note:: Volume groups and logical volumes must exist. + + +.. _osd_scenario_collocated: collocated ---------- +.. warning:: This scenario is deprecated in the Ceph mimic release, and fully + removed in newer releases. It is recommended to used the + :ref:`lvm scenario ` instead + This OSD scenario uses ``ceph-disk`` to create OSDs with collocated journals from raw devices. @@ -18,7 +247,7 @@ has the following required configuration options: This scenario has the following optional configuration options: - ``osd_objectstore``: defaults to ``filestore`` if not set. Available options are ``filestore`` or ``bluestore``. - You can only select ``bluestore`` if the Ceph release is Luminous or greater. + You can only select ``bluestore`` if the Ceph release is luminous or greater. - ``dmcrypt``: defaults to ``false`` if not set. @@ -53,9 +282,16 @@ An example of using the ``collocated`` OSD scenario with encryption would look l - /dev/sda - /dev/sdb + +.. _osd_scenario_non_collocated: + non-collocated -------------- +.. warning:: This scenario is deprecated in the Ceph mimic release, and fully + removed in newer releases. It is recommended to used the + :ref:`lvm scenario ` instead + This OSD scenario uses ``ceph-disk`` to create OSDs from raw devices with journals that exist on a dedicated device. @@ -69,7 +305,7 @@ This scenario has the following optional configuration options: - ``dedicated_devices``: defaults to ``devices`` if not set - ``osd_objectstore``: defaults to ``filestore`` if not set. Available options are ``filestore`` or ``bluestore``. - You can only select ``bluestore`` with the Ceph release is Luminous or greater. + You can only select ``bluestore`` with the Ceph release is luminous or greater. - ``dmcrypt``: defaults to ``false`` if not set. @@ -170,186 +406,3 @@ An example of using the ``non-collocated`` OSD scenario with encryption, bluesto bluestore_wal_devices: - /dev/sdd - /dev/sdd - -lvm ---- - -This OSD scenario uses ``ceph-volume`` to create OSDs from logical volumes and -is only available when the Ceph release is Luminous or newer. - - -Configurations -^^^^^^^^^^^^^^ - -``lvm_volumes`` or ``devices`` are the config option that needs to be defined to deploy OSDs -with the ``lvm`` osd scenario. - -- ``lvm_volumes`` is a list of dictionaries which expects a volume name and a volume group for - logical volumes, but can also accept a partition in the case of ``filestore`` for the ``journal``. - If ``lvm_volumes`` is defined then the ``ceph-volume lvm create`` command is used to create each OSD - defined in ``lvm_volumes``. - -- ``devices`` is a list of raw device names as strings. If ``devices`` is defined then the ``ceph-volume lvm batch`` - command will be used to deploy OSDs. You can also use the ``osds_per_device`` variable to inform ``ceph-volume`` how - many OSDs it should create from each device it finds suitable. - -Both ``lvm_volumes`` and ``devices`` can be defined and both methods would be used in the deployment or you -can pick just one method. - -This scenario supports encrypting your OSDs by setting ``dmcrypt: True``. If set, -all OSDs defined in ``lvm_volumes`` will be encrypted. - -The ``data`` key represents the logical volume name, raw device or partition that is to be used for your -OSD data. The ``data_vg`` key represents the volume group name that your -``data`` logical volume resides on. This key is required for purging of OSDs -created by this scenario. - -.. note:: - - Any logical volume or logical group used in ``lvm_volumes`` must be a name and not a path. - -.. note:: - - You can not use the same journal for many OSDs. - - -``filestore`` -^^^^^^^^^^^^^ - -There is filestore support which can be enabled with: - -.. code-block:: yaml - - osd_objectstore: filestore - -To configure this scenario use the ``lvm_volumes`` config option. -``lvm_volumes`` is a list of dictionaries which expects a volume name and -a volume group for logical volumes, but can also accept a parition in the case of -``filestore`` for the ``journal``. - -The following keys are accepted for a ``filestore`` deployment: - -* ``data`` -* ``data_vg`` (not required if ``data`` is a raw device or partition) -* ``journal`` -* ``journal_vg`` (not required if ``journal`` is a partition and not a logical volume) -* ``crush_device_class`` (optional, sets the crush device class for the OSD) - -The ``journal`` key represents the logical volume name or partition that will be used for your OSD journal. - -For example, a configuration to use the ``lvm`` osd scenario would look like: - -.. code-block:: yaml - - osd_objectstore: filestore - osd_scenario: lvm - lvm_volumes: - - data: data-lv1 - data_vg: vg1 - journal: journal-lv1 - journal_vg: vg2 - crush_device_class: foo - - data: data-lv2 - journal: /dev/sda - data_vg: vg1 - - data: data-lv3 - journal: /dev/sdb1 - data_vg: vg2 - - data: /dev/sda - journal: /dev/sdb1 - - data: /dev/sda1 - journal: journal-lv1 - journal_vg: vg2 - -For example, a configuration to use the ``lvm`` osd scenario with encryption would look like: - -.. code-block:: yaml - - osd_objectstore: filestore - osd_scenario: lvm - dmcrypt: True - lvm_volumes: - - data: data-lv1 - data_vg: vg1 - journal: journal-lv1 - journal_vg: vg2 - crush_device_class: foo - -If you wished to use ``devices`` instead of ``lvm_volumes`` your configuration would look like: - -.. code-block:: yaml - - osd_objectstore: filestore - osd_scenario: lvm - crush_device_class: foo - devices: - - /dev/sda - - /dev/sdc - -.. note:: - - If you wish to change set the ``crush_device_class`` for the OSDs when using ``devices`` you must set it - using the global ``crush_device_class`` option as shown above. There is no way to define a specific crush device - class per OSD when using ``devices`` like there is for ``lvm_volumes``. - -``bluestore`` -^^^^^^^^^^^^^ - -This scenario allows a combination of devices to be used in an OSD. -``bluestore`` can work just with a single "block" device (specified by the -``data`` and optionally ``data_vg``) or additionally with a ``block.wal`` and ``block.db`` -(interchangeably) - -The following keys are accepted for a ``bluestore`` deployment: - -* ``data`` (required) -* ``data_vg`` (not required if ``data`` is a raw device or partition) -* ``db`` (optional for ``block.db``) -* ``db_vg`` (optional for ``block.db``) -* ``wal`` (optional for ``block.wal``) -* ``wal_vg`` (optional for ``block.wal``) -* ``crush_device_class`` (optional, sets the crush device class for the OSD) - -A ``bluestore`` lvm deployment, for all four different combinations supported -could look like: - -.. code-block:: yaml - - osd_objectstore: bluestore - osd_scenario: lvm - lvm_volumes: - - data: data-lv1 - data_vg: vg1 - crush_device_class: foo - - data: data-lv2 - data_vg: vg1 - wal: wal-lv1 - wal_vg: vg2 - - data: data-lv3 - data_vg: vg2 - db: db-lv1 - db_vg: vg2 - - data: data-lv4 - data_vg: vg4 - db: db-lv4 - db_vg: vg4 - wal: wal-lv4 - wal_vg: vg4 - - data: /dev/sda - -If you wished to use ``devices`` instead of ``lvm_volumes`` your configuration would look like: - -.. code-block:: yaml - - osd_objectstore: bluestore - osd_scenario: lvm - crush_device_class: foo - devices: - - /dev/sda - - /dev/sdc - -.. note:: - - If you wish to change set the ``crush_device_class`` for the OSDs when using ``devices`` you must set it - using the global ``crush_device_class`` option as shown above. There is no way to define a specific crush device - class per OSD when using ``devices`` like there is for ``lvm_volumes``. From eddb95941b7c57cde28be4d99751f80d30ce6fc4 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 28 Sep 2018 16:23:10 -0500 Subject: [PATCH 018/105] igw: valid client CHAP settings. The linux kernel target layer, LIO, does not support the iscsi target to mix ACLs that have chap enabled and disabled under the same tpg. This patch adds a check and fails if this type of setup is detected. This fixes Red Hat BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1615088 Signed-off-by: Mike Christie --- roles/ceph-validate/tasks/check_iscsi.yml | 10 ++++++++++ roles/ceph-validate/tasks/main.yml | 5 +++++ 2 files changed, 15 insertions(+) create mode 100644 roles/ceph-validate/tasks/check_iscsi.yml diff --git a/roles/ceph-validate/tasks/check_iscsi.yml b/roles/ceph-validate/tasks/check_iscsi.yml new file mode 100644 index 000000000..6c6a7670c --- /dev/null +++ b/roles/ceph-validate/tasks/check_iscsi.yml @@ -0,0 +1,10 @@ +--- +- name: fail if unsupported chap configuration + fail: + msg: "Mixing clients with CHAP enabled and disabled is not supported." + with_items: "{{ client_connections | default({}) }}" + when: + - item.status is defined + - item.status == "present" + - item.chap != '' + - " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list" diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index 71b490431..5458a0567 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -82,3 +82,8 @@ - radosgw_interface != "dummy" - radosgw_address == "0.0.0.0" - radosgw_address_block == "subnet" + +- name: include check_iscsi.yml + include: check_iscsi.yml + when: + - iscsi_gw_group_name in group_names From 85071e6e530ddd80df35920d9fbe63047478d66b Mon Sep 17 00:00:00 2001 From: Benjamin Cherian Date: Wed, 5 Sep 2018 09:59:50 -0700 Subject: [PATCH 019/105] Add support for different NTP daemons Allow user to choose between timesyncd, chronyd and ntpd Installation will default to timesyncd since it is distributed as part of the systemd installation for most distros. Added note indicating NTP daemon type is not used for containerized deployments. Fixes issue #3086 on Github Signed-off-by: Benjamin Cherian --- group_vars/all.yml.sample | 9 ++++-- group_vars/rhcs.yml.sample | 9 ++++-- roles/ceph-common/tasks/misc/ntp_debian.yml | 36 +++++++++++++++------ roles/ceph-common/tasks/misc/ntp_rpm.yml | 36 +++++++++++++++------ roles/ceph-defaults/defaults/main.yml | 9 ++++-- roles/ceph-validate/tasks/main.yml | 7 ++++ 6 files changed, 82 insertions(+), 24 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 7d6b0a879..42e6fa814 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -105,10 +105,15 @@ dummy: # Whether or not to install the ceph-test package. #ceph_test: false -# Enable the ntp service by default to avoid clock skew on -# ceph nodes +# Enable the ntp service by default to avoid clock skew on ceph nodes +# Disable if an appropriate NTP client is already installed and configured #ntp_service_enabled: true +# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd +# Note that this selection is currently ignored on containerized deployments +#ntp_daemon_type: timesyncd + + # Set uid/gid to default '64045' for bootstrap directories. # '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. # These values have to be set according to the base OS used by the container image, NOT the host. diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 055ad00f2..b260831dc 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -105,10 +105,15 @@ fetch_directory: ~/ceph-ansible-keys # Whether or not to install the ceph-test package. #ceph_test: false -# Enable the ntp service by default to avoid clock skew on -# ceph nodes +# Enable the ntp service by default to avoid clock skew on ceph nodes +# Disable if an appropriate NTP client is already installed and configured #ntp_service_enabled: true +# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd +# Note that this selection is currently ignored on containerized deployments +#ntp_daemon_type: timesyncd + + # Set uid/gid to default '64045' for bootstrap directories. # '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. # These values have to be set according to the base OS used by the container image, NOT the host. diff --git a/roles/ceph-common/tasks/misc/ntp_debian.yml b/roles/ceph-common/tasks/misc/ntp_debian.yml index f94003d7a..f1da045f1 100644 --- a/roles/ceph-common/tasks/misc/ntp_debian.yml +++ b/roles/ceph-common/tasks/misc/ntp_debian.yml @@ -1,11 +1,29 @@ --- -- name: install ntp on debian - package: - name: ntp - state: present +- name: setup ntpd + block: + - command: timedatectl set-ntp no + - package: + name: ntp + state: present + - service: + name: ntp + enabled: yes + state: started + when: ntp_daemon_type == "ntpd" -- name: start the ntp service - service: - name: ntp - enabled: yes - state: started +- name: setup chrony + block: + - command: timedatectl set-ntp no + - package: + name: chrony + state: present + - service: + name: chronyd + enabled: yes + state: started + when: ntp_daemon_type == "chronyd" + +- name: setup timesyncd + block: + - command: timedatectl set-ntp on + when: ntp_daemon_type == "timesyncd" diff --git a/roles/ceph-common/tasks/misc/ntp_rpm.yml b/roles/ceph-common/tasks/misc/ntp_rpm.yml index 91d2d7d8a..866667c2b 100644 --- a/roles/ceph-common/tasks/misc/ntp_rpm.yml +++ b/roles/ceph-common/tasks/misc/ntp_rpm.yml @@ -1,11 +1,29 @@ --- -- name: install ntp - package: - name: ntp - state: present +- name: setup ntpd + block: + - command: timedatectl set-ntp no + - package: + name: ntp + state: present + - service: + name: ntpd + enabled: yes + state: started + when: ntp_daemon_type == "ntpd" -- name: start the ntp service - service: - name: ntpd - enabled: yes - state: started +- name: setup chrony + block: + - command: timedatectl set-ntp no + - package: + name: chrony + state: present + - service: + name: chronyd + enabled: yes + state: started + when: ntp_daemon_type == "chronyd" + +- name: setup timesyncd + block: + - command: timedatectl set-ntp on + when: ntp_daemon_type == "timesyncd" diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 8fd965582..91e2aa5ea 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -97,10 +97,15 @@ suse_package_dependencies: # Whether or not to install the ceph-test package. ceph_test: false -# Enable the ntp service by default to avoid clock skew on -# ceph nodes +# Enable the ntp service by default to avoid clock skew on ceph nodes +# Disable if an appropriate NTP client is already installed and configured ntp_service_enabled: true +# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd +# Note that this selection is currently ignored on containerized deployments +ntp_daemon_type: timesyncd + + # Set uid/gid to default '64045' for bootstrap directories. # '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. # These values have to be set according to the base OS used by the container image, NOT the host. diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index 5458a0567..dd7d764e5 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -28,6 +28,13 @@ - ceph_rhcs_cdn_debian_repo == 'https://customername:customerpasswd@rhcs.download.redhat.com' - ceph_repository not in ['rhcs', 'dev', 'obs'] +- name: validate ntp daemon type + fail: + msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd" + when: + - ntp_service_enabled + - ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd'] + - name: make sure journal_size configured debug: msg: "WARNING: journal_size is configured to {{ journal_size }}, which is less than 5GB. This is not recommended and can lead to severe issues." From 6130bc841dd25adf9a1ae26e6f82aef6b33328d8 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 2 Oct 2018 15:55:47 +0200 Subject: [PATCH 020/105] config: look up for monitor_address_block in hostvars `monitor_address_block` should be read from hostvars[host] instead of current node being played. eg: Let's assume we have: ``` [mons] ceph-mon0 monitor_address=192.168.1.10 ceph-mon1 monitor_interface=eth1 ceph-mon2 monitor_address_block=192.168.1.0/24 ``` the ceph.conf generation task will end up with: ``` fatal: [ceph-mon0]: FAILED! => {} MSG: 'ansible.vars.hostvars.HostVarsVars object' has no attribute u'ansible_interface' ``` the reason is that it will assume `monitor_address_block` isn't defined even on ceph-mon2 because looking for `monitor_address_block` instead of `hostvars[host]['monitor_address_block']`, therefore it enters in the condition as default value: ``` {%- else -%} {% set interface = 'ansible_' + (monitor_interface | replace('-', '_')) %} {% if ip_version == 'ipv4' -%} {{ hostvars[host][interface][ip_version]['address'] }} {%- elif ip_version == 'ipv6' -%} [{{ hostvars[host][interface][ip_version][0]['address'] }}] {%- endif %} {%- endif %} ``` `monitor_interface` is set with default value `'interface'` so the `interface` variable is built with 'ansible_' + 'interface'. It makes ansible throwing a confusing message about `'ansible_interface'`. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1635303 Signed-off-by: Guillaume Abrioux --- roles/ceph-config/templates/ceph.conf.j2 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 index 090d5fb51..18b7b1539 100644 --- a/roles/ceph-config/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -44,11 +44,11 @@ mon initial members = {% for host in groups[mon_group_name] %} {% if not containerized_deployment and not containerized_deployment_with_kv -%} mon host = {% if nb_mon > 0 %} {% for host in groups[mon_group_name] -%} - {% if monitor_address_block != 'subnet' %} + {% if hostvars[host]['monitor_address_block'] is defined and hostvars[host]['monitor_address_block'] != 'subnet' %} {% if ip_version == 'ipv4' -%} - {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }} + {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }} {%- elif ip_version == 'ipv6' -%} - [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}] + [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }}] {%- endif %} {% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%} {% if ip_version == 'ipv4' -%} @@ -84,11 +84,11 @@ log file = /dev/null mon cluster log file = /dev/null mon host = {% if nb_mon > 0 %} {% for host in groups[mon_group_name] -%} - {% if monitor_address_block != 'subnet' %} + {% if hostvars[host]['monitor_address_block'] is defined and hostvars[host]['monitor_address_block'] != 'subnet' %} {% if ip_version == 'ipv4' -%} - {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }} + {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }} {%- elif ip_version == 'ipv6' -%} - [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}] + [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[host]['monitor_address_block']) | first }}] {%- endif %} {% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%} {% if ip_version == 'ipv4' -%} From 54b02fe1876cb6a33aebd8fcba04bd426f30b967 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 2 Oct 2018 17:31:49 +0200 Subject: [PATCH 021/105] switch: support migration when cluster is scrubbing Similar to c13a3c3 we must allow scrubbing when running this playbook. In cluster with a large number of PGs, it can be expected some of them scrubbing, it's a normal operation. Preventing from scrubbing operation force to set noscrub flag. This commit allows to switch from non containerized to containerized environment even while PGs are scrubbing. Closes: #3182 Signed-off-by: Guillaume Abrioux --- ...h-from-non-containerized-to-containerized-ceph-daemons.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 39f8e674e..827fd8efe 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -295,9 +295,9 @@ command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json" register: ceph_health_post until: > - ((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) == 1 + (((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0) and - (ceph_health_post.stdout | from_json).pgmap.pgs_by_state.0.state_name == "active+clean" + (((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs) delegate_to: "{{ groups[mon_group_name][0] }}" retries: "{{ health_osd_check_retries }}" delay: "{{ health_osd_check_delay }}" From 03e76af7b46e05a6e86121b69190988c06fa0ca5 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 2 Oct 2018 19:22:20 +0200 Subject: [PATCH 022/105] switch: add missing call to ceph-handler role Add missing call the ceph-handler role, otherwise we can't have reference to variable registered from ceph-handler from other roles. Signed-off-by: Guillaume Abrioux --- ...rom-non-containerized-to-containerized-ceph-daemons.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 827fd8efe..f3c8da477 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -111,6 +111,7 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-mon @@ -168,6 +169,7 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-mgr @@ -282,6 +284,7 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-osd @@ -343,6 +346,7 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-mds @@ -385,6 +389,7 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-rgw @@ -427,6 +432,7 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-rbd-mirror @@ -473,5 +479,6 @@ roles: - ceph-defaults + - ceph-handler - ceph-docker-common - ceph-nfs From bae0f41705e4ca25492a7ff0169490331b897874 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 3 Oct 2018 13:39:35 +0200 Subject: [PATCH 023/105] switch: copy initial mon keyring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to copy this key into /etc/ceph so when ceph-docker-common runs it can fetch it to the ansible server. Previously the task wasn't not failing because `fail_on_missing` was False before 2.5, so now it's True hence the failure. Signed-off-by: Sébastien Han --- ...rom-non-containerized-to-containerized-ceph-daemons.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index f3c8da477..8bab92390 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -109,6 +109,13 @@ failed_when: false when: ldb_files.rc == 0 + - name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-docker-common + command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring /etc/ceph/{{ cluster }}.mon.keyring + args: + creates: /etc/ceph/{{ cluster }}.mon.keyring + changed_when: false + failed_when: false + roles: - ceph-defaults - ceph-handler From b5d2ea269f30e272ae33c71bafd0e512dd9f6e31 Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Fri, 10 Aug 2018 08:16:30 -0400 Subject: [PATCH 024/105] don't use "static" field while including tasks Instead used "import_tasks" and "include_tasks" to tell whether tasks must be included statically or dynamically. Fixes: https://github.com/ceph/ceph-ansible/issues/2998 Signed-off-by: Rishabh Dave --- .../untested-by-ci/purge-multisite.yml | 4 +- roles/ceph-common/tasks/main.yml | 60 +++++++------------ roles/ceph-mon/tasks/main.yml | 6 +- roles/ceph-osd/tasks/main.yml | 54 ++++++----------- roles/ceph-rgw/tasks/main.yml | 42 +++++-------- roles/ceph-rgw/tasks/multisite/main.yml | 12 ++-- 6 files changed, 60 insertions(+), 118 deletions(-) diff --git a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml index c17500e52..37608ea09 100644 --- a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml +++ b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml @@ -6,6 +6,6 @@ - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml handlers: - - include: roles/ceph-rgw/handlers/main.yml # Ansible 2.1.0 bug will ignore included handlers without this - static: True + - name: import_tasks roles/ceph-rgw/handlers/main.yml + import_tasks: roles/ceph-rgw/handlers/main.yml diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 9a024246f..049cacf45 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -1,55 +1,39 @@ --- -- name: include installs/install_on_redhat.yml - include: installs/install_on_redhat.yml - when: - - ansible_os_family == 'RedHat' +- name: include_tasks installs/install_on_redhat.yml + include_tasks: installs/install_on_redhat.yml + when: ansible_os_family == 'RedHat' tags: - package-install - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include installs/install_on_suse.yml - include: installs/install_on_suse.yml - when: - - ansible_os_family == 'Suse' +- name: include_tasks installs/install_on_suse.yml + include_tasks: installs/install_on_suse.yml + when: ansible_os_family == 'Suse' tags: - package-install - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include installs/install_on_debian.yml - include: installs/install_on_debian.yml - when: - - ansible_os_family == 'Debian' +- name: include_tasks installs/install_on_debian.yml + include_tasks: installs/install_on_debian.yml + when: ansible_os_family == 'Debian' tags: - package-install - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include installs/install_on_clear.yml - include: installs/install_on_clear.yml - when: - - ansible_os_family == 'ClearLinux' +- name: include_tasks installs/install_on_clear.yml + include_tasks: installs/install_on_clear.yml + when: ansible_os_family == 'ClearLinux' tags: - package-install - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include ntp debian setup tasks - include: "misc/ntp_debian.yml" +- name: include_tasks "misc/ntp_debian.yml" + include_tasks: "misc/ntp_debian.yml" when: - ansible_os_family == 'Debian' - ntp_service_enabled - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include ntp rpm setup tasks - include: "misc/ntp_rpm.yml" +- name: include_tasks "misc/ntp_rpm.yml" + include_tasks: "misc/ntp_rpm.yml" when: - ansible_os_family in ['RedHat', 'Suse'] - ntp_service_enabled - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False - name: get ceph version command: ceph --version @@ -69,20 +53,16 @@ tags: - always -- name: include checks/check_firewall.yml - include: checks/check_firewall.yml +- name: include_tasks checks/check_firewall.yml + include_tasks: checks/check_firewall.yml when: - check_firewall - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include misc/configure_firewall_rpm.yml - include: misc/configure_firewall_rpm.yml +- name: include_tasks misc/configure_firewall_rpm.yml + include_tasks: misc/configure_firewall_rpm.yml when: - configure_firewall - ansible_os_family in ['RedHat', 'Suse'] - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False - name: include facts_mon_fsid.yml include_tasks: facts_mon_fsid.yml diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml index 3cf192d09..ef6eb6211 100644 --- a/roles/ceph-mon/tasks/main.yml +++ b/roles/ceph-mon/tasks/main.yml @@ -13,11 +13,9 @@ import_tasks: start_monitor.yml when: not containerized_deployment -- name: include ceph_keys.yml - include: ceph_keys.yml +- name: include_tasks ceph_keys.yml + include_tasks: ceph_keys.yml when: not containerized_deployment - # this avoids the bug mentioned here: https://github.com/ansible/ansible/issues/18206 - static: no - name: include secure_cluster.yml include_tasks: secure_cluster.yml diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 15399c72a..d7ed8e83a 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -1,8 +1,6 @@ --- -- name: include system_tuning.yml - include: system_tuning.yml - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False +- name: include_tasks system_tuning.yml + include_tasks: system_tuning.yml - name: install dependencies package: @@ -12,10 +10,8 @@ - not containerized_deployment - ansible_os_family != 'ClearLinux' -- name: include common.yml - include: common.yml - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False +- name: include_tasks common.yml + include_tasks: common.yml - name: include ceph_disk_cli_options_facts.yml include_tasks: ceph_disk_cli_options_facts.yml @@ -35,61 +31,47 @@ when: - osd_scenario != 'lvm' -- name: include scenarios/collocated.yml - include: scenarios/collocated.yml +- name: include_tasks scenarios/collocated.yml + include_tasks: scenarios/collocated.yml when: - osd_scenario == 'collocated' - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include scenarios/non-collocated.yml - include: scenarios/non-collocated.yml +- name: include_tasks scenarios/non-collocated.yml + include_tasks: scenarios/non-collocated.yml when: - not osd_auto_discovery - osd_scenario == 'non-collocated' - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include scenarios/lvm.yml - include: scenarios/lvm.yml +- name: include_tasks scenarios/lvm.yml + include_tasks: scenarios/lvm.yml when: - osd_scenario == 'lvm' - lvm_volumes|length > 0 - not containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include scenarios/lvm-batch.yml - include: scenarios/lvm-batch.yml +- name: include_tasks scenarios/lvm-batch.yml + include_tasks: scenarios/lvm-batch.yml when: - osd_scenario == 'lvm' - devices|length > 0 - not containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include activate_osds.yml - include: activate_osds.yml +- name: include_tasks activate_osds.yml + include_tasks: activate_osds.yml when: - not containerized_deployment - osd_scenario != 'lvm' - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include start_osds.yml - include: start_osds.yml +- name: include_tasks start_osds.yml + include_tasks: start_osds.yml when: - not containerized_deployment - osd_scenario != 'lvm' - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include docker/main.yml - include: docker/main.yml +- name: include_tasks docker/main.yml + include_tasks: docker/main.yml when: - containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False - name: set_fact openstack_keys_tmp - preserve backward compatibility after the introduction of the ceph_keys module set_fact: diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 9abb84424..9d86b1c56 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -2,42 +2,28 @@ - name: include common.yml include_tasks: common.yml -- name: include pre_requisite.yml - include: pre_requisite.yml - when: - - not containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False +- name: include_tasks pre_requisite.yml + include_tasks: pre_requisite.yml + when: not containerized_deployment -- name: include openstack-keystone.yml - include: openstack-keystone.yml - when: - - radosgw_keystone_ssl|bool - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False +- name: include_tasks openstack-keystone.yml + include_tasks: openstack-keystone.yml + when: radosgw_keystone_ssl|bool -- name: include start_radosgw.yml - include: start_radosgw.yml - when: - - not containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False +- name: include_tasks start_radosgw.yml + include_tasks: start_radosgw.yml + when: not containerized_deployment -- name: include multisite/main.yml - include: multisite/main.yml +- name: include_tasks multisite/main.yml + include_tasks: multisite/main.yml when: - rgw_zone != "" - rgw_multisite - ceph_release_num[ceph_release] >= ceph_release_num.jewel - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include docker/main.yml - include: docker/main.yml - when: - - containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False +- name: include_tasks docker/main.yml + include_tasks: docker/main.yml + when: containerized_deployment - name: create rgw pools if rgw_create_pools is defined command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" diff --git a/roles/ceph-rgw/tasks/multisite/main.yml b/roles/ceph-rgw/tasks/multisite/main.yml index 1f1b1a6fb..dd8ac4ea6 100644 --- a/roles/ceph-rgw/tasks/multisite/main.yml +++ b/roles/ceph-rgw/tasks/multisite/main.yml @@ -3,21 +3,17 @@ include_tasks: checks.yml # Include the tasks depending on the zone type -- name: include master multisite tasks - include: master.yml +- name: include_tasks master.yml + include_tasks: master.yml when: - rgw_zonemaster is defined - rgw_zonemaster - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False -- name: include secondary multisite tasks - include: secondary.yml +- name: include_tasks secondary.yml + include_tasks: secondary.yml when: - rgw_zonesecondary is defined - rgw_zonesecondary - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False # Continue with common tasks - name: add zone to rgw stanza in ceph.conf From 79bd06ad28d8a078bd95923b230eb8d5b66095b2 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 5 Oct 2018 13:15:54 +0200 Subject: [PATCH 025/105] rolling_update: add ceph-handler role since the introduction of ceph-handler, it has to be added in rolling_update playbook as well Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/rolling_update.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 3013fffa3..d5d5ea64f 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -117,6 +117,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -289,6 +290,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -353,6 +355,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -506,6 +509,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -552,6 +556,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -606,6 +611,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -656,6 +662,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -711,6 +718,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config @@ -741,6 +749,7 @@ roles: - ceph-defaults + - ceph-handler - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config From be31c15ccdae73e22c58d7bec566847804ff91b4 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 4 Oct 2018 10:02:24 +0200 Subject: [PATCH 026/105] follow up on b5d2ea2 Add some missed statements Signed-off-by: Guillaume Abrioux --- roles/ceph-config/tasks/main.yml | 2 +- roles/ceph-defaults/tasks/main.yml | 2 +- roles/ceph-validate/tasks/main.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 5e5b3526e..c12bce2e7 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: include create_ceph_initial_dirs.yml - include: create_ceph_initial_dirs.yml + include_tasks: create_ceph_initial_dirs.yml # ceph-common - block: diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml index 0d1f7c93c..37b7149d2 100644 --- a/roles/ceph-defaults/tasks/main.yml +++ b/roles/ceph-defaults/tasks/main.yml @@ -1,3 +1,3 @@ --- - name: include facts.yml - include: facts.yml + include_tasks: facts.yml diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index dd7d764e5..50a5a2c17 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -91,6 +91,6 @@ - radosgw_address_block == "subnet" - name: include check_iscsi.yml - include: check_iscsi.yml + include_tasks: check_iscsi.yml when: - iscsi_gw_group_name in group_names From 3e2cdcc7354ca1951db1e87d42098a9e66253ffb Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 5 Oct 2018 14:33:04 +0200 Subject: [PATCH 027/105] common: remove check_firewall code Check firewall isn't working as expected and might break deployments. This part of the code will be reworked soon. Let's focus on configure_firewall code for now. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1541840 Signed-off-by: Guillaume Abrioux --- group_vars/all.yml.sample | 6 - group_vars/rhcs.yml.sample | 6 - .../tasks/checks/check_firewall.yml | 117 ------------------ roles/ceph-common/tasks/main.yml | 5 - roles/ceph-defaults/defaults/main.yml | 6 - 5 files changed, 140 deletions(-) delete mode 100644 roles/ceph-common/tasks/checks/check_firewall.yml diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 42e6fa814..5c5a7fd66 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -56,12 +56,6 @@ dummy: #iscsi_gw_group_name: iscsigws #mgr_group_name: mgrs -# If check_firewall is true, then ansible will try to determine if the -# Ceph ports are blocked by a firewall. If the machine running ansible -# cannot reach the Ceph ports for some other reason, you may need or -# want to set this to False to skip those checks. -#check_firewall: False - # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index b260831dc..919890db4 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -56,12 +56,6 @@ fetch_directory: ~/ceph-ansible-keys #iscsi_gw_group_name: iscsigws #mgr_group_name: mgrs -# If check_firewall is true, then ansible will try to determine if the -# Ceph ports are blocked by a firewall. If the machine running ansible -# cannot reach the Ceph ports for some other reason, you may need or -# want to set this to False to skip those checks. -#check_firewall: False - # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. diff --git a/roles/ceph-common/tasks/checks/check_firewall.yml b/roles/ceph-common/tasks/checks/check_firewall.yml deleted file mode 100644 index d1e6adb72..000000000 --- a/roles/ceph-common/tasks/checks/check_firewall.yml +++ /dev/null @@ -1,117 +0,0 @@ ---- -- name: check if nmap is installed - local_action: - module: command - command -v nmap - changed_when: false - failed_when: false - register: nmapexist - run_once: true - check_mode: no - -- name: inform that nmap is not present - debug: - msg: "nmap is not installed, can not test if ceph ports are allowed :(" - run_once: true - when: - - nmapexist.rc != 0 - -- name: check if monitor port is not filtered - local_action: - module: shell - set -o pipefail && nmap -p 6789 {{ hostvars[inventory_hostname]['ansible_' + monitor_interface]['ipv4']['address'] if hostvars[inventory_hostname]['ansible_' + monitor_interface] is defined else hostvars[inventory_hostname]['monitor_address'] }} | grep -sqo -e filtered -e '0 hosts up' - changed_when: false - failed_when: false - register: monportstate - check_mode: no - when: - - mon_group_name in group_names - - nmapexist.rc == 0 - -- name: fail if monitor port is filtered - fail: - msg: "Please allow port 6789 on your firewall" - when: - - mon_group_name in group_names - - nmapexist.rc == 0 - - monportstate.rc == 0 - -- name: check if osd and mds range is not filtered (osd hosts) - local_action: - module: shell - set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up' - changed_when: false - failed_when: false - register: osdrangestate - check_mode: no - when: - - osd_group_name in group_names - - nmapexist.rc == 0 - -- name: fail if osd and mds range is filtered (osd hosts) - fail: - msg: "Please allow range from 6800 to 7300 on your firewall" - when: - - osd_group_name in group_names - - nmapexist.rc == 0 - - osdrangestate.rc == 0 - -- name: check if osd and mds range is not filtered (mds hosts) - local_action: - module: shell - set -o pipefail && nmap -p 6800-7300 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up' - changed_when: false - failed_when: false - register: mdsrangestate - check_mode: no - when: - - mds_group_name in group_names - - nmapexist.rc == 0 - -- name: fail if osd and mds range is filtered (mds hosts) - fail: - msg: "Please allow range from 6800 to 7300 on your firewall" - when: - - mds_group_name in group_names - - nmapexist.rc == 0 - - mdsrangestate.rc == 0 - -- name: check if rados gateway port is not filtered - local_action: - module: shell - set -o pipefail && nmap -p {{ radosgw_frontend_port }} {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up' - changed_when: false - failed_when: false - register: rgwportstate - check_mode: no - when: - - rgw_group_name in group_names - - nmapexist.rc == 0 - -- name: fail if rados gateway port is filtered - fail: - msg: "Please allow port {{ radosgw_frontend_port }} on your firewall" - when: - - rgw_group_name in group_names - - nmapexist.rc == 0 - - rgwportstate.rc == 0 - -- name: check if NFS ports are not filtered - local_action: - module: shell - set -o pipefail && nmap -p 111,2049 {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} | grep -sqo -e filtered -e '0 hosts up' - changed_when: false - failed_when: false - register: nfsportstate - check_mode: no - when: - - nfs_group_name in group_names - - nmapexist.rc == 0 - -- name: fail if NFS ports are filtered - fail: - msg: "Please allow ports 111 and 2049 on your firewall" - when: - - nfs_group_name in group_names - - nmapexist.rc == 0 - - nfsportstate.rc == 0 diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 049cacf45..14b38787d 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -53,11 +53,6 @@ tags: - always -- name: include_tasks checks/check_firewall.yml - include_tasks: checks/check_firewall.yml - when: - - check_firewall - - name: include_tasks misc/configure_firewall_rpm.yml include_tasks: misc/configure_firewall_rpm.yml when: diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 91e2aa5ea..9a1905b04 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -48,12 +48,6 @@ client_group_name: clients iscsi_gw_group_name: iscsigws mgr_group_name: mgrs -# If check_firewall is true, then ansible will try to determine if the -# Ceph ports are blocked by a firewall. If the machine running ansible -# cannot reach the Ceph ports for some other reason, you may need or -# want to set this to False to skip those checks. -check_firewall: False - # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. From 9180f6a2772185dc3dd4b713fca21daef9e41919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 5 Oct 2018 14:05:11 +0200 Subject: [PATCH 028/105] rhcs: add helpers for the containerized deployment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We give more assistance to consultants deplying by setting the registry and the image name. Signed-off-by: Sébastien Han --- group_vars/rhcs.yml.sample | 6 +++--- rhcs_edits.txt | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 919890db4..b25d12c7e 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -512,9 +512,9 @@ ceph_rhcs_version: 3 ########## #docker_exec_cmd: #docker: false -#ceph_docker_image: "ceph/daemon" -#ceph_docker_image_tag: latest -#ceph_docker_registry: docker.io +ceph_docker_image: "rhceph-3-rhel7" +ceph_docker_image_tag: "latest" +ceph_docker_registry: "registry.access.redhat.com/rhceph/" #ceph_docker_enable_centos_extra_repo: false #ceph_docker_on_openstack: false #containerized_deployment: False diff --git a/rhcs_edits.txt b/rhcs_edits.txt index f554b440b..897e912ad 100644 --- a/rhcs_edits.txt +++ b/rhcs_edits.txt @@ -2,4 +2,7 @@ ceph_repository: rhcs ceph_origin: repository fetch_directory: ~/ceph-ansible-keys ceph_rhcs_version: 3 +ceph_docker_image: "rhceph-3-rhel7" +ceph_docker_image_tag: "latest" +ceph_docker_registry: "registry.access.redhat.com/rhceph/" # END OF FILE, DO NOT TOUCH ME! \ No newline at end of file From 82ec5a29f235a07f396779564e0cec0b9a2ebfdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 8 Oct 2018 09:45:58 -0400 Subject: [PATCH 029/105] site: use default value for 'cluster' variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If someone's cluster name is 'ceph' then the playbook will fail (with no errors because of ignore_errors) saying it can not find the variable. So let's declare the default. If the cluster name is different then it'll be in group_vars and thus there won't be any failre. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1636962 Signed-off-by: Sébastien Han --- site-docker.yml.sample | 4 ++-- site.yml.sample | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/site-docker.yml.sample b/site-docker.yml.sample index 7d2e0fb7b..cba5cd896 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -374,14 +374,14 @@ become: True tasks: - name: get ceph status from the first monitor - command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s + command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s register: ceph_status changed_when: false delegate_to: "{{ groups['mons'][0] }}" run_once: true ignore_errors: true # we skip the error if mon_group_name is different than 'mons' - - name: "show ceph status for cluster {{ cluster }}" + - name: "show ceph status for cluster {{ cluster | default ('ceph') }}" debug: msg: "{{ ceph_status.stdout_lines }}" delegate_to: "{{ groups['mons'][0] }}" diff --git a/site.yml.sample b/site.yml.sample index a8a4342ff..6ead1290a 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -408,14 +408,14 @@ become: True tasks: - name: get ceph status from the first monitor - command: ceph --cluster {{ cluster }} -s + command: ceph --cluster {{ cluster | default ('ceph') }} -s register: ceph_status changed_when: false delegate_to: "{{ groups['mons'][0] }}" run_once: true ignore_errors: true # we skip the error if mon_group_name is different than 'mons' - - name: "show ceph status for cluster {{ cluster }}" + - name: "show ceph status for cluster {{ cluster | default ('ceph') }}" debug: msg: "{{ ceph_status.stdout_lines }}" delegate_to: "{{ groups['mons'][0] }}" From 8bb131c7128cdd951bc1f576c6b8fde54c194f88 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 20 Sep 2018 12:18:53 -0500 Subject: [PATCH 030/105] ceph-volume: add the journal_size and block_db_size options These can be used for the the --journal-size and --block-db-size options of `lvm batch`. Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 12180c1cd..c3cff4cc1 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -95,6 +95,19 @@ options: - Only applicable if action is 'batch'. required: false default: 1 + journal_size: + description: + - The size in MB of filestore journals. + - Only applicable if action is 'batch'. + required: false + default: 5120 + block_db_size: + description: + - The size in bytes of bluestore block db lvs. + - The default of -1 means to create them as big as possible. + - Only applicable if action is 'batch'. + required: false + default: -1 author: @@ -158,6 +171,8 @@ def batch(module): crush_device_class = module.params.get('crush_device_class', None) dmcrypt = module.params['dmcrypt'] osds_per_device = module.params['osds_per_device'] + journal_size = module.params['journal_size'] + block_db_size = module.params['block_db_size'] if not batch_devices: module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) @@ -181,6 +196,12 @@ def batch(module): if osds_per_device > 1: cmd.extend(["--osds-per-device", osds_per_device]) + if objectstore == "filestore": + cmd.extend(["--journal-size", journal_size]) + + if objectstore == "bluestore" and block_db_size != -1: + cmd.extend(["--block-db-size", block_db_size]) + cmd.extend(batch_devices) result = dict( @@ -407,6 +428,8 @@ def run_module(): dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), + journal_size=dict(type='int', required=False, default=5120), + block_db_size=dict(type='int', required=False, default=-1), ) module = AnsibleModule( From 71ce539da5b59417e4f9a7d8d0c9eff773178ab0 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 20 Sep 2018 12:24:07 -0500 Subject: [PATCH 031/105] ceph-defaults: add the block_db_size option This is used in the lvm osd scenario for the 'lvm batch' subcommand of ceph-volume. Signed-off-by: Andrew Schoen --- group_vars/all.yml.sample | 1 + group_vars/rhcs.yml.sample | 1 + roles/ceph-defaults/defaults/main.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 5c5a7fd66..e6f95f4fa 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -364,6 +364,7 @@ dummy: #non_hci_safety_factor: 0.7 #osd_memory_target: 4000000000 #journal_size: 5120 # OSD journal size in MB +#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. #public_network: 0.0.0.0/0 #cluster_network: "{{ public_network | regex_replace(' ', '') }}" #osd_mkfs_type: xfs diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index b25d12c7e..cabb77279 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -364,6 +364,7 @@ ceph_rhcs_version: 3 #non_hci_safety_factor: 0.7 #osd_memory_target: 4000000000 #journal_size: 5120 # OSD journal size in MB +#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. #public_network: 0.0.0.0/0 #cluster_network: "{{ public_network | regex_replace(' ', '') }}" #osd_mkfs_type: xfs diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 9a1905b04..511b2294b 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -356,6 +356,7 @@ hci_safety_factor: 0.2 non_hci_safety_factor: 0.7 osd_memory_target: 4000000000 journal_size: 5120 # OSD journal size in MB +block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. public_network: 0.0.0.0/0 cluster_network: "{{ public_network | regex_replace(' ', '') }}" osd_mkfs_type: xfs From c453ea25c0a0eff9094b27daf42ba17dfd1d0f7b Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 20 Sep 2018 12:26:24 -0500 Subject: [PATCH 032/105] ceph-osd: use journal_size and block_db_size for lvm batch Signed-off-by: Andrew Schoen --- roles/ceph-osd/tasks/scenarios/lvm-batch.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml index d3afc438f..cff4c18ec 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml @@ -8,6 +8,8 @@ dmcrypt: "{{ dmcrypt|default(omit) }}" crush_device_class: "{{ crush_device_class|default(omit) }}" osds_per_device: "{{ osds_per_device }}" + journal_size: "{{ journal_size }}" + block_db_size: "{{ block_db_size }}" action: "batch" environment: CEPH_VOLUME_DEBUG: 1 From 07a384ba569d17d04818d6ba61f723172ba5e377 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 20 Sep 2018 13:17:29 -0500 Subject: [PATCH 033/105] ceph_volume: adds the report parameter Will pass the --report command to ceph-volume lvm batch. Results will be returned in json format. Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index c3cff4cc1..ee3cae284 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -108,6 +108,13 @@ options: - Only applicable if action is 'batch'. required: false default: -1 + report: + description: + - If provided the --report flag will be passed to 'ceph-volume lvm batch'. + - No OSDs will be created. + - Results will be returned in json format. + - Only applicable if action is 'batch'. + required: false author: @@ -173,6 +180,7 @@ def batch(module): osds_per_device = module.params['osds_per_device'] journal_size = module.params['journal_size'] block_db_size = module.params['block_db_size'] + report = module.params['report'] if not batch_devices: module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) @@ -202,6 +210,12 @@ def batch(module): if objectstore == "bluestore" and block_db_size != -1: cmd.extend(["--block-db-size", block_db_size]) + if report: + cmd.extend([ + "--report", + "--format=json", + ]) + cmd.extend(batch_devices) result = dict( @@ -430,6 +444,7 @@ def run_module(): osds_per_device=dict(type='int', required=False, default=1), journal_size=dict(type='int', required=False, default=5120), block_db_size=dict(type='int', required=False, default=-1), + report=dict(type='bool', required=False, default=False), ) module = AnsibleModule( From 8afef3d0de5afb67160981b4b4b76b0452d831f9 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Thu, 20 Sep 2018 13:32:00 -0500 Subject: [PATCH 034/105] ceph-config: use the ceph_volume module to get num_osds for lvm batch This gives us an accurate number of how many osds will be created. Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 7 ++++--- roles/ceph-config/tasks/main.yml | 28 +++++++++++++++++++++------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index ee3cae284..21c651c25 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -2,6 +2,7 @@ import datetime import json + ANSIBLE_METADATA = { 'metadata_version': '1.0', 'status': ['preview'], @@ -207,7 +208,7 @@ def batch(module): if objectstore == "filestore": cmd.extend(["--journal-size", journal_size]) - if objectstore == "bluestore" and block_db_size != -1: + if objectstore == "bluestore" and block_db_size != "-1": cmd.extend(["--block-db-size", block_db_size]) if report: @@ -442,8 +443,8 @@ def run_module(): dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), - journal_size=dict(type='int', required=False, default=5120), - block_db_size=dict(type='int', required=False, default=-1), + journal_size=dict(type='str', required=False, default="5120"), + block_db_size=dict(type='str', required=False, default="-1"), report=dict(type='bool', required=False, default=False), ) diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index c12bce2e7..9df56e44b 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -27,16 +27,30 @@ - lvm_volumes | default([]) | length > 0 - osd_scenario == 'lvm' - # This is a best guess. Ideally we'd like to use `ceph-volume lvm batch --report` to get - # a more accurate number but the ceph.conf needs to be in place before that is possible. - # There is a tracker to add functionality to ceph-volume which would allow doing this - # without the need for a ceph.conf: http://tracker.ceph.com/issues/36088 - - name: count number of osds for lvm batch scenario - set_fact: - num_osds: "{{ devices | length | int * osds_per_device | default(1) }}" + - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created + ceph_volume: + cluster: "{{ cluster }}" + objectstore: "{{ osd_objectstore }}" + batch_devices: "{{ devices }}" + osds_per_device: "{{ osds_per_device | default(1) | int }}" + journal_size: "{{ journal_size }}" + block_db_size: "{{ block_db_size }}" + report: true + action: "batch" + register: lvm_batch_report + environment: + CEPH_VOLUME_DEBUG: 1 when: - devices | default([]) | length > 0 - osd_scenario == 'lvm' + + - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' + set_fact: + num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}" + when: + - devices | default([]) | length > 0 + - osd_scenario == 'lvm' + when: - inventory_hostname in groups.get(osd_group_name, []) From 2ffad1b43af0b56872ea09a8d3d5cbdff72098a4 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 25 Sep 2018 15:05:08 -0500 Subject: [PATCH 035/105] ceph-volume: adds `lvm list` support to the ceph_volume module Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 52 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 21c651c25..f59b70d11 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -37,7 +37,7 @@ options: description: - The action to take. Either creating OSDs or zapping devices. required: true - choices: ['create', 'zap', 'batch'] + choices: ['create', 'zap', 'batch', 'list'] default: create data: description: @@ -172,6 +172,52 @@ def get_wal(wal, wal_vg): return wal +def _list(module): + cmd = [ + 'ceph-volume', + 'lvm', + 'list', + '--format=json', + ] + + result = dict( + changed=False, + cmd=cmd, + stdout='', + stderr='', + rc='', + start='', + end='', + delta='', + ) + + if module.check_mode: + return result + + startd = datetime.datetime.now() + + rc, out, err = module.run_command(cmd, encoding=None) + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + stdout=out.rstrip(b"\r\n"), + stderr=err.rstrip(b"\r\n"), + rc=rc, + start=str(startd), + end=str(endd), + delta=str(delta), + changed=True, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + def batch(module): cluster = module.params['cluster'] objectstore = module.params['objectstore'] @@ -430,7 +476,7 @@ def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'), - action=dict(type='str', required=False, choices=['create', 'zap', 'batch'], default='create'), + action=dict(type='str', required=False, choices=['create', 'zap', 'batch', 'list'], default='create'), data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), @@ -461,6 +507,8 @@ def run_module(): zap_devices(module) elif action == "batch": batch(module) + elif action == "list": + _list(module) module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1) From 40f82319dd0ccee9d33620ad579d20701589fd61 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 25 Sep 2018 15:25:40 -0500 Subject: [PATCH 036/105] ceph-config: use 'lvm list' to find num_osds for an existing cluster This makes finding num_osds idempotent for clusters that were deployed using 'lvm batch'. Signed-off-by: Andrew Schoen --- roles/ceph-config/tasks/main.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 9df56e44b..2f7b8f587 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -38,6 +38,7 @@ report: true action: "batch" register: lvm_batch_report + failed_when: false environment: CEPH_VOLUME_DEBUG: 1 when: @@ -50,6 +51,26 @@ when: - devices | default([]) | length > 0 - osd_scenario == 'lvm' + - (lvm_batch_report.stdout | from_json).changed + + - name: run 'ceph-volume lvm list' to see how many osds have already been created + ceph_volume: + action: "list" + register: lvm_list + environment: + CEPH_VOLUME_DEBUG: 1 + when: + - devices | default([]) | length > 0 + - osd_scenario == 'lvm' + - not (lvm_batch_report.stdout | from_json).changed + + - name: set_fact num_osds from the output of 'ceph-volume lvm list' + set_fact: + num_osds: "{{ lvm_list.stdout | from_json | length | int }}" + when: + - devices | default([]) | length > 0 + - osd_scenario == 'lvm' + - not (lvm_batch_report.stdout | from_json).changed when: - inventory_hostname in groups.get(osd_group_name, []) From 5ee305d1a02f9cf0196b85ab277e408319e4b24c Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Mon, 1 Oct 2018 12:51:47 -0500 Subject: [PATCH 037/105] ceph-volume: make the batch action idempotent The command is run with --report first to see if any OSDs will be created or not. If they will be, then the command is run. If not, then changed is set to False and the module exits. Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index f59b70d11..f05bab57c 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -1,6 +1,7 @@ #!/usr/bin/python import datetime import json +import copy ANSIBLE_METADATA = { @@ -257,11 +258,10 @@ def batch(module): if objectstore == "bluestore" and block_db_size != "-1": cmd.extend(["--block-db-size", block_db_size]) - if report: - cmd.extend([ - "--report", - "--format=json", - ]) + report_flags = [ + "--report", + "--format=json", + ] cmd.extend(batch_devices) @@ -281,11 +281,23 @@ def batch(module): startd = datetime.datetime.now() - rc, out, err = module.run_command(cmd, encoding=None) + report_cmd = copy.copy(cmd) + report_cmd.extend(report_flags) + + rc, out, err = module.run_command(report_cmd, encoding=None) + report_result = json.loads(out) + if not report: + rc, out, err = module.run_command(cmd, encoding=None) + else: + cmd = report_cmd endd = datetime.datetime.now() delta = endd - startd + changed = True + if not report: + changed = report_result['changed'] + result = dict( cmd=cmd, stdout=out.rstrip(b"\r\n"), @@ -294,7 +306,7 @@ def batch(module): start=str(startd), end=str(endd), delta=str(delta), - changed=True, + changed=changed, ) if rc != 0: From a68c6802253db5f688d50e2a5dd8b8ebf87c4ff2 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Mon, 1 Oct 2018 15:06:50 -0500 Subject: [PATCH 038/105] tests: remove journal_size from lvm-batch testing scenario Signed-off-by: Andrew Schoen --- tests/functional/centos/7/lvm-batch/group_vars/all | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/functional/centos/7/lvm-batch/group_vars/all b/tests/functional/centos/7/lvm-batch/group_vars/all index 2a7105d7c..2e5daca3a 100644 --- a/tests/functional/centos/7/lvm-batch/group_vars/all +++ b/tests/functional/centos/7/lvm-batch/group_vars/all @@ -7,7 +7,6 @@ public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" monitor_interface: eth1 radosgw_interface: eth1 -journal_size: 100 osd_objectstore: "bluestore" crush_device_class: test osd_scenario: lvm From a63ca220e6b034de01b162b3a527764512ab573c Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 2 Oct 2018 13:50:01 -0500 Subject: [PATCH 039/105] ceph-volume: if --report fails to load json, fail with better info This handles the case gracefully where --report does not return any JSON because a validator might have failed. Signed-off-by: Andrew Schoen --- library/ceph_volume.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index f05bab57c..2d281b1b8 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -285,7 +285,18 @@ def batch(module): report_cmd.extend(report_flags) rc, out, err = module.run_command(report_cmd, encoding=None) - report_result = json.loads(out) + try: + report_result = json.loads(out) + except ValueError: + result = dict( + cmd=report_cmd, + stdout=out.rstrip(b"\r\n"), + stderr=err.rstrip(b"\r\n"), + rc=rc, + changed=True, + ) + module.fail_json(msg='non-zero return code', **result) + if not report: rc, out, err = module.run_command(cmd, encoding=None) else: From 436dc8c5e107abf2fa32f645db850d76d6e30ca8 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 2 Oct 2018 13:56:09 -0500 Subject: [PATCH 040/105] ceph-config: allow the batch --report to fail when getting the OSD num Signed-off-by: Andrew Schoen --- roles/ceph-config/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 2f7b8f587..8130c922e 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -38,7 +38,6 @@ report: true action: "batch" register: lvm_batch_report - failed_when: false environment: CEPH_VOLUME_DEBUG: 1 when: From ada03d064db4aeb9cfaf359250a9ca0cf3046e1d Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 9 Oct 2018 10:04:51 -0400 Subject: [PATCH 041/105] ceph-validate: remove versions checks for bluestore and lvm scenario These checks will never pass unless ceph_stable_release is passed and ceph-defaults is run before ceph-validate. Additionally, we don't want to support deploying jewel upstream at ceph-ansible master. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1637537 Signed-off-by: Andrew Schoen --- roles/ceph-validate/tasks/main.yml | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index 50a5a2c17..7ce8cfd61 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -43,27 +43,6 @@ - osd_objectstore == 'filestore' - osd_group_name in group_names -- name: check if osd_scenario lvm is supported by the selected ceph version - fail: - msg: "osd_scenario lvm is not supported by the selected Ceph version, use Luminous or newer." - when: - - osd_group_name is defined - - osd_group_name in group_names - - ceph_repository not in ['rhcs', 'dev'] - - not containerized_deployment - - osd_scenario == "lvm" - - ceph_release_num[ceph_release] < ceph_release_num.luminous - -- name: check if bluestore is supported by the selected ceph version - fail: - msg: "bluestore is not supported by the selected Ceph version, use Luminous or above." - when: - - osd_group_name is defined - - osd_group_name in group_names - - ceph_repository not in ['rhcs', 'dev'] - - osd_objectstore == 'bluestore' - - ceph_release_num[ceph_release] < ceph_release_num.luminous - - name: include check_system.yml include_tasks: check_system.yml From 3e488e8298a0c8ec4ec98317a1d7f1efc4926257 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Tue, 9 Oct 2018 13:40:38 -0400 Subject: [PATCH 042/105] tests: install lvm2 before setting up ceph-volume/LVM tests Signed-off-by: Alfredo Deza --- tests/functional/lvm_setup.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml index 280c2756d..f1f4801d6 100644 --- a/tests/functional/lvm_setup.yml +++ b/tests/functional/lvm_setup.yml @@ -5,6 +5,12 @@ become: yes tasks: + # Some images may not have lvm2 installed + - name: install lvm2 + package: + name: lvm2 + state: present + - name: create physical volume command: pvcreate /dev/sdb failed_when: false From ce8e740f626c4fbaba62a207eed3079e677f477c Mon Sep 17 00:00:00 2001 From: Ramana Raja Date: Tue, 9 Oct 2018 18:01:28 +0530 Subject: [PATCH 043/105] docs: Correct mandatory config options 'radosgw_interface' or 'radosgw_address' config option does not need to be set for all ceph-ansible deployments. Closes: https://github.com/ceph/ceph-ansible/issues/3143 Signed-off-by: Ramana Raja --- docs/source/index.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index 48f246641..16ca72fa2 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -222,7 +222,9 @@ selection or other aspects of your cluster. - ``public_network`` - ``osd_scenario`` - ``monitor_interface`` or ``monitor_address`` -- ``radosgw_interface`` or ``radosgw_address`` + + +When deploying RGW instance(s) you are required to set the ``radosgw_interface`` or ``radosgw_address`` config option. ``ceph.conf`` Configuration File --------------------------------- From cc6f41f76a03528bd801768b1d1c8469839c99a9 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 9 Oct 2018 15:43:08 -0400 Subject: [PATCH 044/105] tests: fix lvm2 setup issue not gathering fact causes `package` module to fail because it needs to detect which OS we are running on to select the right package manager. Signed-off-by: Guillaume Abrioux --- tests/functional/lvm_setup.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml index f1f4801d6..ef30614eb 100644 --- a/tests/functional/lvm_setup.yml +++ b/tests/functional/lvm_setup.yml @@ -1,7 +1,7 @@ --- - hosts: osds - gather_facts: false + gather_facts: true become: yes tasks: From 306e308f133c9b9757d6cae5f88d2c39903cae2f Mon Sep 17 00:00:00 2001 From: Noah Watkins Date: Fri, 5 Oct 2018 15:53:40 -0700 Subject: [PATCH 045/105] Avoid using tests as filter Fixes the deprecation warning: [DEPRECATION WARNING]: Using tests as filters is deprecated. Instead of using `result|search` use `result is search`. Signed-off-by: Noah Watkins --- infrastructure-playbooks/rolling_update.yml | 4 +-- ...inerized-to-containerized-ceph-daemons.yml | 32 +++++++++---------- roles/ceph-common/tasks/release-rhcs.yml | 10 +++--- roles/ceph-defaults/tasks/facts.yml | 6 ++-- roles/ceph-docker-common/tasks/release.yml | 10 +++--- .../templates/rbd-target-api.service.j2 | 2 +- .../templates/rbd-target-gw.service.j2 | 2 +- .../templates/tcmu-runner.service.j2 | 2 +- roles/ceph-mds/templates/ceph-mds.service.j2 | 2 +- roles/ceph-mgr/templates/ceph-mgr.service.j2 | 2 +- roles/ceph-mon/tasks/docker/copy_configs.yml | 2 +- roles/ceph-mon/templates/ceph-mon.service.j2 | 2 +- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 2 +- .../templates/ceph-rbd-mirror.service.j2 | 2 +- .../templates/ceph-radosgw.service.j2 | 2 +- roles/ceph-validate/tasks/check_system.yml | 4 +-- 16 files changed, 43 insertions(+), 43 deletions(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index d5d5ea64f..f81fbd61d 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -414,7 +414,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - - ceph_versions_osd | string | search("ceph version 10") + - ceph_versions_osd | string is search("ceph version 10") - not jewel_minor_update - name: get num_pgs - non container @@ -483,7 +483,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - - ceph_versions_osd | string | search("ceph version 12") + - ceph_versions_osd | string is search("ceph version 12") - not jewel_minor_update diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 8bab92390..f9ec87c1f 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -76,17 +76,17 @@ - name: set_fact ceph_uid for ubuntu set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is search("ubuntu") - name: set_fact ceph_uid for red hat set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("centos") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("centos") or ceph_docker_image_tag is search("fedora") - name: set_fact ceph_uid for rhel set_fact: ceph_uid: 167 - when: ceph_docker_image | search("rhceph") + when: ceph_docker_image is search("rhceph") - name: set proper ownership on ceph directories file: @@ -159,11 +159,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") - name: set proper ownership on ceph directories file: @@ -231,17 +231,17 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") - name: set_fact ceph_uid for red hat set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("centos") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("centos") or ceph_docker_image_tag is search("fedora") - name: set_fact ceph_uid for rhel set_fact: ceph_uid: 167 - when: ceph_docker_image | search("rhceph") + when: ceph_docker_image is search("rhceph") - name: set proper ownership on ceph directories file: @@ -336,11 +336,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") - name: set proper ownership on ceph directories file: @@ -379,11 +379,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") - name: set proper ownership on ceph directories file: @@ -422,11 +422,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") - name: set proper ownership on ceph directories file: @@ -469,11 +469,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag | match("latest") or ceph_docker_image_tag | search("ubuntu") + when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag | search("centos") or ceph_docker_image | search("rhceph") or ceph_docker_image_tag | search("fedora") + when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") - name: set proper ownership on ceph directories file: diff --git a/roles/ceph-common/tasks/release-rhcs.yml b/roles/ceph-common/tasks/release-rhcs.yml index 973eb91d9..d0ac6d3a4 100644 --- a/roles/ceph-common/tasks/release-rhcs.yml +++ b/roles/ceph-common/tasks/release-rhcs.yml @@ -3,28 +3,28 @@ set_fact: ceph_release: jewel when: - - ceph_version.split('.')[0] | version_compare('10', '==') + - ceph_version.split('.')[0] is version_compare('10', '==') - name: set_fact ceph_release kraken set_fact: ceph_release: kraken when: - - ceph_version.split('.')[0] | version_compare('11', '==') + - ceph_version.split('.')[0] is version_compare('11', '==') - name: set_fact ceph_release luminous set_fact: ceph_release: luminous when: - - ceph_version.split('.')[0] | version_compare('12', '==') + - ceph_version.split('.')[0] is version_compare('12', '==') - name: set_fact ceph_release mimic set_fact: ceph_release: mimic when: - - ceph_version.split('.')[0] | version_compare('13', '==') + - ceph_version.split('.')[0] is version_compare('13', '==') - name: set_fact ceph_release nautilus set_fact: ceph_release: nautilus when: - - ceph_version.split('.')[0] | version_compare('14', '==') + - ceph_version.split('.')[0] is version_compare('14', '==') diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml index 49f49338d..bfa4aafee 100644 --- a/roles/ceph-defaults/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -209,21 +209,21 @@ ceph_uid: 64045 when: - containerized_deployment - - ceph_docker_image_tag | search("ubuntu") + - ceph_docker_image_tag is search("ubuntu") - name: set_fact ceph_uid for red hat based system - container set_fact: ceph_uid: 167 when: - containerized_deployment - - ceph_docker_image_tag | search("latest") or ceph_docker_image_tag | search("centos") or ceph_docker_image_tag | search("fedora") + - ceph_docker_image_tag is search("latest") or ceph_docker_image_tag is search("centos") or ceph_docker_image_tag is search("fedora") - name: set_fact ceph_uid for red hat set_fact: ceph_uid: 167 when: - containerized_deployment - - ceph_docker_image | search("rhceph") + - ceph_docker_image is search("rhceph") - name: set_fact rgw_hostname set_fact: diff --git a/roles/ceph-docker-common/tasks/release.yml b/roles/ceph-docker-common/tasks/release.yml index 973eb91d9..d0ac6d3a4 100644 --- a/roles/ceph-docker-common/tasks/release.yml +++ b/roles/ceph-docker-common/tasks/release.yml @@ -3,28 +3,28 @@ set_fact: ceph_release: jewel when: - - ceph_version.split('.')[0] | version_compare('10', '==') + - ceph_version.split('.')[0] is version_compare('10', '==') - name: set_fact ceph_release kraken set_fact: ceph_release: kraken when: - - ceph_version.split('.')[0] | version_compare('11', '==') + - ceph_version.split('.')[0] is version_compare('11', '==') - name: set_fact ceph_release luminous set_fact: ceph_release: luminous when: - - ceph_version.split('.')[0] | version_compare('12', '==') + - ceph_version.split('.')[0] is version_compare('12', '==') - name: set_fact ceph_release mimic set_fact: ceph_release: mimic when: - - ceph_version.split('.')[0] | version_compare('13', '==') + - ceph_version.split('.')[0] is version_compare('13', '==') - name: set_fact ceph_release nautilus set_fact: ceph_release: nautilus when: - - ceph_version.split('.')[0] | version_compare('14', '==') + - ceph_version.split('.')[0] is version_compare('14', '==') diff --git a/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 b/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 index fd31d21f8..69781693d 100644 --- a/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 +++ b/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop rbd-target-api ExecStartPre=-/usr/bin/docker rm rbd-target-api ExecStart=/usr/bin/docker run --rm \ --memory={{ ceph_rbd_target_api_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 b/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 index e7bc303f3..3aa18d1c1 100644 --- a/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 +++ b/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop rbd-target-gw ExecStartPre=-/usr/bin/docker rm rbd-target-gw ExecStart=/usr/bin/docker run --rm \ --memory={{ ceph_rbd_target_gw_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 b/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 index c19711a97..8159125f5 100644 --- a/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 +++ b/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop tcmu-runner ExecStartPre=-/usr/bin/docker rm tcmu-runner ExecStart=/usr/bin/docker run --rm \ --memory={{ ceph_tcmu_runner_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-mds/templates/ceph-mds.service.j2 b/roles/ceph-mds/templates/ceph-mds.service.j2 index 1f6f01164..9e3dcfb5b 100644 --- a/roles/ceph-mds/templates/ceph-mds.service.j2 +++ b/roles/ceph-mds/templates/ceph-mds.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }} ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }} ExecStart=/usr/bin/docker run --rm --net=host \ --memory={{ ceph_mds_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_mds_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_mds_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-mgr/templates/ceph-mgr.service.j2 b/roles/ceph-mgr/templates/ceph-mgr.service.j2 index 5c4ef56b6..766290e25 100644 --- a/roles/ceph-mgr/templates/ceph-mgr.service.j2 +++ b/roles/ceph-mgr/templates/ceph-mgr.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }} ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }} ExecStart=/usr/bin/docker run --rm --net=host \ --memory={{ ceph_mgr_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_mgr_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-mon/tasks/docker/copy_configs.yml b/roles/ceph-mon/tasks/docker/copy_configs.yml index b08054414..b7407a2b3 100644 --- a/roles/ceph-mon/tasks/docker/copy_configs.yml +++ b/roles/ceph-mon/tasks/docker/copy_configs.yml @@ -43,4 +43,4 @@ - "{{ statconfig.results }}" when: - item.1.stat.exists == true - - item.0 | search("keyring") + - item.0 is search("keyring") diff --git a/roles/ceph-mon/templates/ceph-mon.service.j2 b/roles/ceph-mon/templates/ceph-mon.service.j2 index 5c591b64f..8bd0e7e4d 100644 --- a/roles/ceph-mon/templates/ceph-mon.service.j2 +++ b/roles/ceph-mon/templates/ceph-mon.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker rm ceph-mon-%i ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon' ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \ --memory={{ ceph_mon_docker_memory_limit }} \ -{% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} +{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_mon_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_mon_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index f91166b31..fc8f0a30b 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -61,7 +61,7 @@ expose_partitions "$1" --privileged=true \ --pid=host \ --memory={{ ceph_osd_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_osd_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_osd_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 index 4f966b230..cd853f775 100644 --- a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 +++ b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }} ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }} ExecStart=/usr/bin/docker run --rm --net=host \ --memory={{ ceph_rbd_mirror_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_rbd_mirror_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 index 410e7dffb..98b6344d3 100644 --- a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 +++ b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 @@ -8,7 +8,7 @@ ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }} ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }} ExecStart=/usr/bin/docker run --rm --net=host \ --memory={{ ceph_rgw_docker_memory_limit }} \ - {% if ceph_docker_version.split('.')[0] | version_compare('13', '>=') -%} + {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%} --cpus={{ ceph_rgw_docker_cpu_limit }} \ {% else -%} --cpu-quota={{ ceph_rgw_docker_cpu_limit * 100000 }} \ diff --git a/roles/ceph-validate/tasks/check_system.yml b/roles/ceph-validate/tasks/check_system.yml index c343c54ea..1707e7446 100644 --- a/roles/ceph-validate/tasks/check_system.yml +++ b/roles/ceph-validate/tasks/check_system.yml @@ -23,7 +23,7 @@ when: - ansible_distribution == 'Red Hat Enterprise Linux' - ceph_repository == 'rhcs' - - ansible_distribution_version | version_compare('7.3', '<') + - ansible_distribution_version is version_compare('7.3', '<') - name: determine if node is registered with subscription-manager command: subscription-manager identity @@ -57,7 +57,7 @@ msg: "Distribution not supported: {{ ansible_distribution }}" when: - ansible_distribution == 'openSUSE Leap' - - ansible_distribution_version | version_compare('42.3', '<') + - ansible_distribution_version is version_compare('42.3', '<') - name: fail on unsupported ansible version (1.X) fail: From 8dcc8d1434dbe2837d91162f4647246c54826e97 Mon Sep 17 00:00:00 2001 From: Noah Watkins Date: Fri, 5 Oct 2018 15:56:45 -0700 Subject: [PATCH 046/105] Stringify ceph_docker_image_tag This could be a numeric input, but is treated like a string leading to runtime errors. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1635823 Signed-off-by: Noah Watkins --- ...inerized-to-containerized-ceph-daemons.yml | 28 +++++++++---------- roles/ceph-defaults/tasks/facts.yml | 4 +-- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index f9ec87c1f..296edd446 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -76,12 +76,12 @@ - name: set_fact ceph_uid for ubuntu set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is search("ubuntu") - name: set_fact ceph_uid for red hat set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("centos") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora") - name: set_fact ceph_uid for rhel set_fact: @@ -159,11 +159,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora") - name: set proper ownership on ceph directories file: @@ -231,12 +231,12 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu") - name: set_fact ceph_uid for red hat set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("centos") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora") - name: set_fact ceph_uid for rhel set_fact: @@ -336,11 +336,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora") - name: set proper ownership on ceph directories file: @@ -379,11 +379,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora") - name: set proper ownership on ceph directories file: @@ -422,11 +422,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora") - name: set proper ownership on ceph directories file: @@ -469,11 +469,11 @@ - set_fact: ceph_uid: 64045 - when: ceph_docker_image_tag is match("latest") or ceph_docker_image_tag is search("ubuntu") + when: ceph_docker_image_tag | string is match("latest") or ceph_docker_image_tag | string is search("ubuntu") - set_fact: ceph_uid: 167 - when: ceph_docker_image_tag is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag is search("fedora") + when: ceph_docker_image_tag | string is search("centos") or ceph_docker_image is search("rhceph") or ceph_docker_image_tag | string is search("fedora") - name: set proper ownership on ceph directories file: diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml index bfa4aafee..ab899d648 100644 --- a/roles/ceph-defaults/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -209,14 +209,14 @@ ceph_uid: 64045 when: - containerized_deployment - - ceph_docker_image_tag is search("ubuntu") + - ceph_docker_image_tag | string is search("ubuntu") - name: set_fact ceph_uid for red hat based system - container set_fact: ceph_uid: 167 when: - containerized_deployment - - ceph_docker_image_tag is search("latest") or ceph_docker_image_tag is search("centos") or ceph_docker_image_tag is search("fedora") + - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora") - name: set_fact ceph_uid for red hat set_fact: From b3a71eeb08e9cdb2607ed60d724f387a0a24d3de Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 5 Oct 2018 15:42:52 +0200 Subject: [PATCH 047/105] ceph-infra: add new role ceph-infra this role manages ceph infra services such as ntp, firewall, ... Signed-off-by: Guillaume Abrioux --- roles/ceph-common/tasks/main.yml | 18 ------------------ roles/ceph-infra/meta/main.yml | 19 +++++++++++++++++++ .../tasks}/configure_firewall_rpm.yml | 0 roles/ceph-infra/tasks/main.yml | 18 ++++++++++++++++++ .../misc => ceph-infra/tasks}/ntp_debian.yml | 0 .../misc => ceph-infra/tasks}/ntp_rpm.yml | 0 site.yml.sample | 1 + 7 files changed, 38 insertions(+), 18 deletions(-) create mode 100644 roles/ceph-infra/meta/main.yml rename roles/{ceph-common/tasks/misc => ceph-infra/tasks}/configure_firewall_rpm.yml (100%) create mode 100644 roles/ceph-infra/tasks/main.yml rename roles/{ceph-common/tasks/misc => ceph-infra/tasks}/ntp_debian.yml (100%) rename roles/{ceph-common/tasks/misc => ceph-infra/tasks}/ntp_rpm.yml (100%) diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 14b38787d..894bf8e8a 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -23,18 +23,6 @@ tags: - package-install -- name: include_tasks "misc/ntp_debian.yml" - include_tasks: "misc/ntp_debian.yml" - when: - - ansible_os_family == 'Debian' - - ntp_service_enabled - -- name: include_tasks "misc/ntp_rpm.yml" - include_tasks: "misc/ntp_rpm.yml" - when: - - ansible_os_family in ['RedHat', 'Suse'] - - ntp_service_enabled - - name: get ceph version command: ceph --version changed_when: false @@ -53,12 +41,6 @@ tags: - always -- name: include_tasks misc/configure_firewall_rpm.yml - include_tasks: misc/configure_firewall_rpm.yml - when: - - configure_firewall - - ansible_os_family in ['RedHat', 'Suse'] - - name: include facts_mon_fsid.yml include_tasks: facts_mon_fsid.yml run_once: true diff --git a/roles/ceph-infra/meta/main.yml b/roles/ceph-infra/meta/main.yml new file mode 100644 index 000000000..a965e1d61 --- /dev/null +++ b/roles/ceph-infra/meta/main.yml @@ -0,0 +1,19 @@ +--- +galaxy_info: + author: Guillaume Abrioux + description: Handles ceph infra requirements (ntp, firewall, ...) + license: Apache + min_ansible_version: 2.3 + platforms: + - name: Ubuntu + versions: + - xenial + - name: EL + versions: + - 7 + - name: opensuse + versions: + - 42.3 + categories: + - system +dependencies: [] diff --git a/roles/ceph-common/tasks/misc/configure_firewall_rpm.yml b/roles/ceph-infra/tasks/configure_firewall_rpm.yml similarity index 100% rename from roles/ceph-common/tasks/misc/configure_firewall_rpm.yml rename to roles/ceph-infra/tasks/configure_firewall_rpm.yml diff --git a/roles/ceph-infra/tasks/main.yml b/roles/ceph-infra/tasks/main.yml new file mode 100644 index 000000000..418c257b6 --- /dev/null +++ b/roles/ceph-infra/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: include_tasks configure_firewall_rpm.yml + include_tasks: configure_firewall_rpm.yml + when: + - configure_firewall + - ansible_os_family in ['RedHat', 'Suse'] + +- name: include_tasks "ntp_debian.yml" + include_tasks: "ntp_debian.yml" + when: + - ansible_os_family == 'Debian' + - ntp_service_enabled + +- name: include_tasks "ntp_rpm.yml" + include_tasks: "ntp_rpm.yml" + when: + - ansible_os_family in ['RedHat', 'Suse'] + - ntp_service_enabled \ No newline at end of file diff --git a/roles/ceph-common/tasks/misc/ntp_debian.yml b/roles/ceph-infra/tasks/ntp_debian.yml similarity index 100% rename from roles/ceph-common/tasks/misc/ntp_debian.yml rename to roles/ceph-infra/tasks/ntp_debian.yml diff --git a/roles/ceph-common/tasks/misc/ntp_rpm.yml b/roles/ceph-infra/tasks/ntp_rpm.yml similarity index 100% rename from roles/ceph-common/tasks/misc/ntp_rpm.yml rename to roles/ceph-infra/tasks/ntp_rpm.yml diff --git a/site.yml.sample b/site.yml.sample index 6ead1290a..769aac56f 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -75,6 +75,7 @@ roles: - ceph-defaults - ceph-validate + - ceph-infra - hosts: mons From 0fb8812e47049a42999ba208403f28346bc18a16 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 9 Oct 2018 13:35:17 -0400 Subject: [PATCH 048/105] infra: update firewall rules, add cluster_network for osds At the moment, all daemons accept connections from 0.0.0.0. We should at least restrict to public_network and add cluster_network for OSDs. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1541840 Signed-off-by: Guillaume Abrioux --- roles/ceph-infra/tasks/configure_firewall_rpm.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/roles/ceph-infra/tasks/configure_firewall_rpm.yml b/roles/ceph-infra/tasks/configure_firewall_rpm.yml index 7a4c6c73e..301c67444 100644 --- a/roles/ceph-infra/tasks/configure_firewall_rpm.yml +++ b/roles/ceph-infra/tasks/configure_firewall_rpm.yml @@ -22,6 +22,7 @@ firewalld: service: ceph-mon zone: "{{ ceph_mon_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -37,6 +38,7 @@ firewalld: service: ceph zone: "{{ ceph_mgr_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -53,9 +55,13 @@ firewalld: service: ceph zone: "{{ ceph_osd_firewall_zone }}" + source: "{{ item }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled + with_items: + - "{{ public_network }}" + - "{{ cluster_network }}" notify: restart firewalld when: - osd_group_name is defined @@ -68,6 +74,7 @@ firewalld: port: "{{ radosgw_frontend_port }}/tcp" zone: "{{ ceph_rgw_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -83,6 +90,7 @@ firewalld: service: ceph zone: "{{ ceph_mds_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -98,6 +106,7 @@ firewalld: service: nfs zone: "{{ ceph_nfs_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -113,6 +122,7 @@ firewalld: port: "111/tcp" zone: "{{ ceph_nfs_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -128,6 +138,7 @@ firewalld: port: "{{ restapi_port }}/tcp" zone: "{{ ceph_restapi_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -143,6 +154,7 @@ firewalld: service: ceph zone: "{{ ceph_rbdmirror_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled @@ -158,6 +170,7 @@ firewalld: port: "5001/tcp" zone: "{{ ceph_iscsi_firewall_zone }}" + source: "{{ public_network }}" permanent: true immediate: false # if true then fails in case firewalld is stopped state: enabled From f8a7ffb08517be9789dd2487040455c798d81e38 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 9 Oct 2018 13:38:51 -0400 Subject: [PATCH 049/105] infra: add firewall configuration for containerized deployment firewalld is available on atomic so there is no reason to not apply firewall configuration. Signed-off-by: Guillaume Abrioux --- .../{configure_firewall_rpm.yml => configure_firewallyml} | 4 ++++ roles/ceph-infra/tasks/main.yml | 6 +++--- site-docker.yml.sample | 1 + 3 files changed, 8 insertions(+), 3 deletions(-) rename roles/ceph-infra/tasks/{configure_firewall_rpm.yml => configure_firewallyml} (97%) diff --git a/roles/ceph-infra/tasks/configure_firewall_rpm.yml b/roles/ceph-infra/tasks/configure_firewallyml similarity index 97% rename from roles/ceph-infra/tasks/configure_firewall_rpm.yml rename to roles/ceph-infra/tasks/configure_firewallyml index 301c67444..c0e41d823 100644 --- a/roles/ceph-infra/tasks/configure_firewall_rpm.yml +++ b/roles/ceph-infra/tasks/configure_firewallyml @@ -9,6 +9,8 @@ changed_when: false tags: - firewall + when: + - not containerized_deployment - name: start firewalld service: @@ -16,7 +18,9 @@ state: started enabled: yes when: + - not firewalld_pkg_query.skipped - firewalld_pkg_query.rc == 0 + or is_atomic - name: open monitor ports firewalld: diff --git a/roles/ceph-infra/tasks/main.yml b/roles/ceph-infra/tasks/main.yml index 418c257b6..af5fd6fcd 100644 --- a/roles/ceph-infra/tasks/main.yml +++ b/roles/ceph-infra/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: include_tasks configure_firewall_rpm.yml - include_tasks: configure_firewall_rpm.yml +- name: include_tasks configure_firewall.yml + include_tasks: configure_firewall.yml when: - configure_firewall - ansible_os_family in ['RedHat', 'Suse'] @@ -15,4 +15,4 @@ include_tasks: "ntp_rpm.yml" when: - ansible_os_family in ['RedHat', 'Suse'] - - ntp_service_enabled \ No newline at end of file + - ntp_service_enabled diff --git a/site-docker.yml.sample b/site-docker.yml.sample index cba5cd896..ebf6927cc 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -54,6 +54,7 @@ - role: ceph-defaults tags: [with_pkg, fetch_container_image] - role: ceph-validate + - role: ceph-infra - role: ceph-handler - role: ceph-docker-common tags: [with_pkg, fetch_container_image] From f666902d52e5456b10fb37971720266d393c4bd1 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 9 Oct 2018 14:02:04 -0400 Subject: [PATCH 050/105] infra: add tags for each subcomponent This way we can skip one specific component if needed. Signed-off-by: Guillaume Abrioux --- roles/ceph-infra/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/ceph-infra/tasks/main.yml b/roles/ceph-infra/tasks/main.yml index af5fd6fcd..8bec42420 100644 --- a/roles/ceph-infra/tasks/main.yml +++ b/roles/ceph-infra/tasks/main.yml @@ -4,15 +4,18 @@ when: - configure_firewall - ansible_os_family in ['RedHat', 'Suse'] + tags: configure_firewall - name: include_tasks "ntp_debian.yml" include_tasks: "ntp_debian.yml" when: - ansible_os_family == 'Debian' - ntp_service_enabled + tags: configure_ntp - name: include_tasks "ntp_rpm.yml" include_tasks: "ntp_rpm.yml" when: - ansible_os_family in ['RedHat', 'Suse'] - ntp_service_enabled + tags: configure_ntp \ No newline at end of file From 678e1553285124c8c30e97f746613d5587701acc Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 10 Oct 2018 12:30:26 -0400 Subject: [PATCH 051/105] infra: fix a typo in filename configure_firewall is missing its dot. Signed-off-by: Guillaume Abrioux --- .../tasks/{configure_firewallyml => configure_firewall.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/ceph-infra/tasks/{configure_firewallyml => configure_firewall.yml} (100%) diff --git a/roles/ceph-infra/tasks/configure_firewallyml b/roles/ceph-infra/tasks/configure_firewall.yml similarity index 100% rename from roles/ceph-infra/tasks/configure_firewallyml rename to roles/ceph-infra/tasks/configure_firewall.yml From aa2c1b27e34b380e07c903f00ad98aff6fa13693 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 9 Jul 2018 16:58:35 +0200 Subject: [PATCH 052/105] ceph-osd: ceph-volume container support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- library/ceph_volume.py | 90 +++++++++++++++------ roles/ceph-osd/tasks/main.yml | 6 +- roles/ceph-osd/tasks/scenarios/lvm.yml | 28 ++++++- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 5 ++ 4 files changed, 102 insertions(+), 27 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 2d281b1b8..0e965ecb2 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -38,7 +38,7 @@ options: description: - The action to take. Either creating OSDs or zapping devices. required: true - choices: ['create', 'zap', 'batch', 'list'] + choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list'] default: create data: description: @@ -65,7 +65,7 @@ options: required: false db_vg: description: - - If db is a lv, this must be the name of the volume group it belongs to. + - If db is a lv, this must be the name of the volume group it belongs to. # noqa E501 - Only applicable if objectstore is 'bluestore'. required: false wal: @@ -75,7 +75,7 @@ options: required: false wal_vg: description: - - If wal is a lv, this must be the name of the volume group it belongs to. + - If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501 - Only applicable if objectstore is 'bluestore'. required: false crush_device_class: @@ -117,7 +117,12 @@ options: - Results will be returned in json format. - Only applicable if action is 'batch'. required: false - + containerized: + description: + - Wether or not this is a containerized cluster. The value is + assigned or not depending on how the playbook runs. + required: false + default: None author: - Andrew Schoen (@andrewschoen) @@ -130,23 +135,27 @@ EXAMPLES = ''' data: data-lv data_vg: data-vg journal: /dev/sdc1 + action: create - name: set up a bluestore osd with a raw device for data ceph_volume: objectstore: bluestore data: /dev/sdc + action: create -- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db + +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa E501 ceph_volume: objectstore: bluestore data: data-lv data_vg: data-vg db: /dev/sdc1 wal: /dev/sdc2 + action: create ''' -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule # noqa 4502 def get_data(data, data_vg): @@ -231,7 +240,8 @@ def batch(module): report = module.params['report'] if not batch_devices: - module.fail_json(msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) + module.fail_json( + msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) # noqa 4502 cmd = [ 'ceph-volume', @@ -326,7 +336,28 @@ def batch(module): module.exit_json(**result) -def create_osd(module): +def ceph_volume_cmd(subcommand, containerized, cluster=None): + cmd = ['ceph-volume'] + if cluster: + cmd.extend(["--cluster", cluster]) + cmd.append('lvm') + cmd.append(subcommand) + + if containerized: + cmd = containerized.split() + cmd + + return cmd + + +def activate_osd(module, containerized=None): + subcommand = "activate" + cmd = ceph_volume_cmd(subcommand) + cmd.append("--all") + + return True + + +def prepare_osd(module): cluster = module.params['cluster'] objectstore = module.params['objectstore'] data = module.params['data'] @@ -339,16 +370,12 @@ def create_osd(module): wal_vg = module.params.get('wal_vg', None) crush_device_class = module.params.get('crush_device_class', None) dmcrypt = module.params['dmcrypt'] + containerized = module.params.get('containerized', None) + subcommand = "prepare" - cmd = [ - 'ceph-volume', - '--cluster', - cluster, - 'lvm', - 'create', - '--%s' % objectstore, - '--data', - ] + cmd = ceph_volume_cmd(subcommand, containerized, cluster) + cmd.extend(["--%s" % objectstore]) + cmd.append("--data") data = get_data(data, data_vg) cmd.append(data) @@ -387,11 +414,17 @@ def create_osd(module): # check to see if osd already exists # FIXME: this does not work when data is a raw device - # support for 'lvm list' and raw devices was added with https://github.com/ceph/ceph/pull/20620 but + # support for 'lvm list' and raw devices + # was added with https://github.com/ceph/ceph/pull/20620 but # has not made it to a luminous release as of 12.2.4 - rc, out, err = module.run_command(["ceph-volume", "lvm", "list", data], encoding=None) + ceph_volume_list_cmd = ["ceph-volume", "lvm", "list", data] + if containerized: + ceph_volume_list_cmd = containerized.split() + ceph_volume_list_cmd + + rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None) if rc == 0: - result["stdout"] = "skipped, since {0} is already used for an osd".format(data) + result["stdout"] = "skipped, since {0} is already used for an osd".format( # noqa E501 + data) result['rc'] = 0 module.exit_json(**result) @@ -498,8 +531,10 @@ def zap_devices(module): def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), - objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'), - action=dict(type='str', required=False, choices=['create', 'zap', 'batch', 'list'], default='create'), + objectstore=dict(type='str', required=False, choices=[ + 'bluestore', 'filestore'], default='bluestore'), + action=dict(type='str', required=False, choices=[ + 'create', 'zap', 'batch', 'prepare', 'activate', 'list'], default='create'), # noqa 4502 data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), @@ -515,6 +550,7 @@ def run_module(): journal_size=dict(type='str', required=False, default="5120"), block_db_size=dict(type='str', required=False, default="-1"), report=dict(type='bool', required=False, default=False), + containerized=dict(type='str', required=False, default=False), ) module = AnsibleModule( @@ -525,7 +561,12 @@ def run_module(): action = module.params['action'] if action == "create": - create_osd(module) + prepare_osd(module) + activate_osd(module) + elif action == "prepare": + prepare_osd(module) + elif action == "activate": + activate_osd(module) elif action == "zap": zap_devices(module) elif action == "batch": @@ -533,7 +574,8 @@ def run_module(): elif action == "list": _list(module) - module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1) + module.fail_json( + msg='State must either be "present" or "absent".', changed=False, rc=1) def main(): diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index d7ed8e83a..3b88fe19d 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -47,14 +47,16 @@ when: - osd_scenario == 'lvm' - lvm_volumes|length > 0 - - not containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False - name: include_tasks scenarios/lvm-batch.yml include_tasks: scenarios/lvm-batch.yml when: - osd_scenario == 'lvm' - devices|length > 0 - - not containerized_deployment + # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) + static: False - name: include_tasks activate_osds.yml include_tasks: activate_osds.yml diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml index ed53967fd..67ad9c341 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -1,4 +1,28 @@ --- +- name: set_fact docker_exec_prepare_cmd + set_fact: + docker_exec_prepare_cmd: "docker exec ceph-volume-prepare" + when: + - containerized_deployment + +- name: run a ceph-volume prepare container (sleep 3000) + command: > + docker run \ + --rm \ + --privileged=true \ + --net=host \ + -v /dev:/dev \ + -d \ + -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \ + -v /var/lib/ceph/:/var/lib/ceph/:z \ + -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ + --name ceph-volume-prepare \ + --entrypoint=sleep \ + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + 3000 + changed_when: false + when: + - containerized_deployment - name: "use ceph-volume to create {{ osd_objectstore }} osds" ceph_volume: @@ -14,6 +38,8 @@ wal_vg: "{{ item.wal_vg|default(omit) }}" crush_device_class: "{{ item.crush_device_class|default(omit) }}" dmcrypt: "{{ dmcrypt|default(omit) }}" + containerized: "{{ docker_exec_prepare_cmd | default(False) }}" + action: "{{ 'prepare' if containerized_deployment else 'create' }}" environment: CEPH_VOLUME_DEBUG: 1 - with_items: "{{ lvm_volumes }}" + with_items: "{{ lvm_volumes }}" \ No newline at end of file diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index fc8f0a30b..ed3f47a3f 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -107,7 +107,12 @@ expose_partitions "$1" {% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%} -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \ {% endif -%} + {% if osd_scenario == 'lvm' -%} + -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ + -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \ + {% else -%} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ + {% endif -%} {{ ceph_osd_docker_extra_env }} \ --name=ceph-osd-{{ ansible_hostname }}-${1} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} From 3ddcc9af16bf3ac9938d9d6db9dcf40f5ccdd1b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 16 Jul 2018 18:09:33 +0200 Subject: [PATCH 053/105] ceph_volume: try to get ride of the dummy container MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we run on a containerized deployment we pass an env variable which contains the container image. Signed-off-by: Sébastien Han --- library/ceph_volume.py | 55 ++++++++++++++----- .../tasks/docker/start_docker_osd.yml | 21 ++++++- roles/ceph-osd/tasks/scenarios/lvm.yml | 27 +-------- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 21 ++++++- 4 files changed, 81 insertions(+), 43 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 0e965ecb2..e7c573fcc 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -1,8 +1,8 @@ #!/usr/bin/python import datetime -import json import copy - +import json +import os ANSIBLE_METADATA = { 'metadata_version': '1.0', @@ -158,6 +158,20 @@ EXAMPLES = ''' from ansible.module_utils.basic import AnsibleModule # noqa 4502 +def container_exec(binary, container_image): + ''' + Build the CLI to run a command inside a container + ''' + + command_exec = ["docker", "run", "--rm", "--privileged", "--net=host", + "-v", "/dev:/dev", "-v", "/etc/ceph:/etc/ceph:z", + "-v", "/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket", + "-v", "/var/lib/ceph/:/var/lib/ceph/:z", + os.path.join("--entrypoint=" + binary), + container_image] + return command_exec + + def get_data(data, data_vg): if data_vg: data = "{0}/{1}".format(data_vg, data) @@ -336,20 +350,26 @@ def batch(module): module.exit_json(**result) -def ceph_volume_cmd(subcommand, containerized, cluster=None): - cmd = ['ceph-volume'] +def ceph_volume_cmd(subcommand, container_image, cluster=None): + + if container_image: + binary = "ceph-volume" + cmd = container_exec( + binary, container_image) + else: + binary = ["ceph-volume"] + cmd = binary + if cluster: cmd.extend(["--cluster", cluster]) + cmd.append('lvm') cmd.append(subcommand) - if containerized: - cmd = containerized.split() + cmd - return cmd -def activate_osd(module, containerized=None): +def activate_osd(module, container_image=None): subcommand = "activate" cmd = ceph_volume_cmd(subcommand) cmd.append("--all") @@ -370,10 +390,14 @@ def prepare_osd(module): wal_vg = module.params.get('wal_vg', None) crush_device_class = module.params.get('crush_device_class', None) dmcrypt = module.params['dmcrypt'] - containerized = module.params.get('containerized', None) subcommand = "prepare" - cmd = ceph_volume_cmd(subcommand, containerized, cluster) + if "CEPH_CONTAINER_IMAGE" in os.environ: + container_image = os.getenv("CEPH_CONTAINER_IMAGE") + else: + container_image = None + + cmd = ceph_volume_cmd(subcommand, container_image, cluster) cmd.extend(["--%s" % objectstore]) cmd.append("--data") @@ -417,9 +441,14 @@ def prepare_osd(module): # support for 'lvm list' and raw devices # was added with https://github.com/ceph/ceph/pull/20620 but # has not made it to a luminous release as of 12.2.4 - ceph_volume_list_cmd = ["ceph-volume", "lvm", "list", data] - if containerized: - ceph_volume_list_cmd = containerized.split() + ceph_volume_list_cmd + ceph_volume_list_cmd_args = ["lvm", "list", data] + if container_image: + binary = "ceph-volume" + ceph_volume_list_cmd = container_exec( + binary, container_image) + ceph_volume_list_cmd_args + else: + binary = ["ceph-volume"] + ceph_volume_list_cmd = binary + ceph_volume_list_cmd_args rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None) if rc == 0: diff --git a/roles/ceph-osd/tasks/docker/start_docker_osd.yml b/roles/ceph-osd/tasks/docker/start_docker_osd.yml index c24e7db01..403bcc43a 100644 --- a/roles/ceph-osd/tasks/docker/start_docker_osd.yml +++ b/roles/ceph-osd/tasks/docker/start_docker_osd.yml @@ -38,10 +38,27 @@ notify: - restart ceph osds +- name: collect osd ids + shell: > + docker run --rm + --privileged=true + -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket + -v /etc/ceph:/etc/ceph:z + -v /dev:/dev + --entrypoint=ceph-volume + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + lvm list --format json | python -c 'import sys, json; print("\n".join(json.load(sys.stdin).keys()))' + changed_when: false + failed_when: false + register: ceph_osd_ids + when: + - containerized_deployment + - osd_scenario == 'lvm' + - name: systemd start osd container systemd: - name: ceph-osd@{{ item | regex_replace('/dev/', '') }} + name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }} state: started enabled: yes daemon_reload: yes - with_items: "{{ devices }}" + with_items: "{{ devices if osd_scenario != 'lvm' else ceph_osd_ids.stdout_lines }}" \ No newline at end of file diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml index 67ad9c341..bff1cf4cb 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -1,29 +1,4 @@ --- -- name: set_fact docker_exec_prepare_cmd - set_fact: - docker_exec_prepare_cmd: "docker exec ceph-volume-prepare" - when: - - containerized_deployment - -- name: run a ceph-volume prepare container (sleep 3000) - command: > - docker run \ - --rm \ - --privileged=true \ - --net=host \ - -v /dev:/dev \ - -d \ - -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \ - -v /var/lib/ceph/:/var/lib/ceph/:z \ - -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ - --name ceph-volume-prepare \ - --entrypoint=sleep \ - {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ - 3000 - changed_when: false - when: - - containerized_deployment - - name: "use ceph-volume to create {{ osd_objectstore }} osds" ceph_volume: cluster: "{{ cluster }}" @@ -38,8 +13,8 @@ wal_vg: "{{ item.wal_vg|default(omit) }}" crush_device_class: "{{ item.crush_device_class|default(omit) }}" dmcrypt: "{{ dmcrypt|default(omit) }}" - containerized: "{{ docker_exec_prepare_cmd | default(False) }}" action: "{{ 'prepare' if containerized_deployment else 'create' }}" environment: CEPH_VOLUME_DEBUG: 1 + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" with_items: "{{ lvm_volumes }}" \ No newline at end of file diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index ed3f47a3f..bd08f35cb 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -1,8 +1,13 @@ #!/bin/bash # {{ ansible_managed }} + +############# +# VARIABLES # +############# DOCKER_ENV="" + ############# # FUNCTIONS # ############# @@ -50,6 +55,16 @@ function expose_partitions { expose_partitions "$1" +{% if osd_scenario == 'lvm' -%} +function find_device_from_id { + OSD_ID="$1" + LV=$(docker run --privileged=true -v /dev:/dev -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z --entrypoint=ceph-volume {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} lvm list --format json | python -c "import sys, json; print(json.load(sys.stdin)[\"$OSD_ID\"][0][\"path\"])") + OSD_DEVICE=$(lvdisplay -m $LV | awk '/Physical volume/ {print $3}') +} + +find_device_from_id $@ +{% endif -%} + ######## # MAIN # @@ -103,16 +118,18 @@ expose_partitions "$1" -e OSD_DMCRYPT=1 \ {% endif -%} -e CLUSTER={{ cluster }} \ - -e OSD_DEVICE=/dev/${1} \ {% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%} -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \ {% endif -%} {% if osd_scenario == 'lvm' -%} -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \ + -e OSD_DEVICE="$OSD_DEVICE" \ + --name=ceph-osd-"$OSD_ID" \ {% else -%} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ + -e OSD_DEVICE=/dev/"${1}" \ + --name=ceph-osd-{{ ansible_hostname }}-"${1}" \ {% endif -%} {{ ceph_osd_docker_extra_env }} \ - --name=ceph-osd-{{ ansible_hostname }}-${1} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} From e39fc4f6cef33104df8cb51dc61661ff30e2c2b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 28 Sep 2018 13:06:18 +0200 Subject: [PATCH 054/105] ceph_volume: add container support for batch command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The batch option got recently added, while rebasing this patch it was necessary to implement it. So now, the batch option can work on containerized environments. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1630977 Signed-off-by: Sébastien Han --- library/ceph_volume.py | 19 ++++++++++--------- roles/ceph-osd/tasks/scenarios/lvm-batch.yml | 1 + 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index e7c573fcc..cca05cab6 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -252,20 +252,21 @@ def batch(module): journal_size = module.params['journal_size'] block_db_size = module.params['block_db_size'] report = module.params['report'] + subcommand = 'batch' if not batch_devices: module.fail_json( msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) # noqa 4502 - cmd = [ - 'ceph-volume', - '--cluster', - cluster, - 'lvm', - 'batch', - '--%s' % objectstore, - '--yes', - ] + if "CEPH_CONTAINER_IMAGE" in os.environ: + container_image = os.getenv("CEPH_CONTAINER_IMAGE") + else: + container_image = None + + cmd = ceph_volume_cmd(subcommand, container_image, cluster) + cmd.extend(["--%s" % objectstore]) + cmd.extend("--yes") + cmd.extend("--no-systemd ") if crush_device_class: cmd.extend(["--crush-device-class", crush_device_class]) diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml index cff4c18ec..c1f1d0295 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml @@ -13,3 +13,4 @@ action: "batch" environment: CEPH_VOLUME_DEBUG: 1 + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" From ece9e9812ea4a9d0c0faf485d780401147cc16c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 28 Sep 2018 17:19:46 +0200 Subject: [PATCH 055/105] osd: do not use expose_partitions on lvm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit expose_partitions is only needed on ceph-disk OSDs so we don't need to activate this code when running lvm prepared OSDs. Signed-off-by: Sébastien Han --- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index bd08f35cb..b549c8f1f 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -11,6 +11,7 @@ DOCKER_ENV="" ############# # FUNCTIONS # ############# +{% if osd_scenario != 'lvm' -%} {% if disk_list.get('rc') == 0 -%} function expose_partitions () { DOCKER_ENV=$(docker run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list) @@ -52,6 +53,7 @@ function expose_partitions { fi } {% endif -%} +{% endif -%} expose_partitions "$1" From 5f35910ee102092faffe90980a713c5309668e20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 28 Sep 2018 18:05:42 +0200 Subject: [PATCH 056/105] osd: change unit template for ceph-volume container MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need to pass the hostname on the container name but we can keep it simple and just call it ceph-osd-$id. Signed-off-by: Sébastien Han --- roles/ceph-osd/templates/ceph-osd.service.j2 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/ceph-osd/templates/ceph-osd.service.j2 b/roles/ceph-osd/templates/ceph-osd.service.j2 index 0e7dae3f0..d7297e0e8 100644 --- a/roles/ceph-osd/templates/ceph-osd.service.j2 +++ b/roles/ceph-osd/templates/ceph-osd.service.j2 @@ -5,10 +5,19 @@ After=docker.service [Service] EnvironmentFile=-/etc/environment +{% if osd_scenario == 'lvm' -%} +ExecStartPre=-/usr/bin/docker stop ceph-osd-%i +ExecStartPre=-/usr/bin/docker rm -f ceph-osd-%i +{% else %} ExecStartPre=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-%i ExecStartPre=-/usr/bin/docker rm -f ceph-osd-{{ ansible_hostname }}-%i +{% endif -%} ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i +{% if osd_scenario == 'lvm' -%} +ExecStop=-/usr/bin/docker stop ceph-osd-%i +{% else %} ExecStop=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-%i +{% endif -%} Restart=always RestartSec=10s TimeoutStartSec=120 From a948677de18d7477555cc5c4c737160a307298f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 28 Sep 2018 18:07:08 +0200 Subject: [PATCH 057/105] osd: ceph-volume activate, just pass the OSD_ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need to pass the device and discover the OSD ID. We have a task that gathers all the OSD ID present on that machine, so we simply re-use them and activate them. This also handles the situation when you have multiple OSDs running on the same device. Signed-off-by: Sébastien Han --- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 22 ++++++--------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index b549c8f1f..ad55ea38c 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -2,17 +2,16 @@ # {{ ansible_managed }} +{% if osd_scenario != 'lvm' -%} +{% if disk_list.get('rc') == 0 -%} ############# # VARIABLES # ############# DOCKER_ENV="" - ############# # FUNCTIONS # ############# -{% if osd_scenario != 'lvm' -%} -{% if disk_list.get('rc') == 0 -%} function expose_partitions () { DOCKER_ENV=$(docker run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list) } @@ -52,19 +51,10 @@ function expose_partitions { exit 1 fi } -{% endif -%} -{% endif -%} expose_partitions "$1" -{% if osd_scenario == 'lvm' -%} -function find_device_from_id { - OSD_ID="$1" - LV=$(docker run --privileged=true -v /dev:/dev -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z --entrypoint=ceph-volume {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} lvm list --format json | python -c "import sys, json; print(json.load(sys.stdin)[\"$OSD_ID\"][0][\"path\"])") - OSD_DEVICE=$(lvdisplay -m $LV | awk '/Physical volume/ {print $3}') -} - -find_device_from_id $@ +{% endif -%} {% endif -%} @@ -94,7 +84,6 @@ find_device_from_id $@ -v /var/lib/ceph:/var/lib/ceph:z \ -v /etc/ceph:/etc/ceph:z \ -v /var/run/ceph:/var/run/ceph:z \ - $DOCKER_ENV \ {% if ansible_distribution == 'Ubuntu' -%} --security-opt apparmor:unconfined \ {% endif -%} @@ -126,9 +115,10 @@ find_device_from_id $@ {% if osd_scenario == 'lvm' -%} -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \ - -e OSD_DEVICE="$OSD_DEVICE" \ - --name=ceph-osd-"$OSD_ID" \ + -e OSD_ID="$1" \ + --name=ceph-osd-"$1" \ {% else -%} + $DOCKER_ENV \ -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ -e OSD_DEVICE=/dev/"${1}" \ --name=ceph-osd-{{ ansible_hostname }}-"${1}" \ From 0580328340fc7d53b54f5c1825b570b6c000f701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 1 Oct 2018 15:27:06 +0200 Subject: [PATCH 058/105] validate: add warning for ceph-disk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ceph-disk will be removed in 3.3 and we encourage to start using ceph-volume as of 3.2. Signed-off-by: Sébastien Han --- roles/ceph-validate/tasks/check_system.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/ceph-validate/tasks/check_system.yml b/roles/ceph-validate/tasks/check_system.yml index 1707e7446..1bda37dee 100644 --- a/roles/ceph-validate/tasks/check_system.yml +++ b/roles/ceph-validate/tasks/check_system.yml @@ -92,3 +92,11 @@ - ansible_distribution in ['RedHat', 'CentOS'] - ansible_distribution_version < '7.4' - iscsi_gw_group_name in group_names + +- name: warn users that ceph-disk scenarios will be removed on 3.3 + debug: + msg: "You are running 3.2 and should start considering using ceph-volume to deploy your OSDs. ceph-disk based deployments will be impossible in 3.3." + run_once: true + when: + - osd_group_name in group_names + - osd_scenario != 'lvm' \ No newline at end of file From 0735d3951842528f83d39a4109e3e2bd5d34d182 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 1 Oct 2018 16:00:21 +0200 Subject: [PATCH 059/105] tests: osd adjust osd name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we use id of the OSD instead of the device name. Signed-off-by: Sébastien Han --- tests/functional/tests/osd/test_osds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/tests/osd/test_osds.py b/tests/functional/tests/osd/test_osds.py index ed0e5a7bc..feafc219b 100644 --- a/tests/functional/tests/osd/test_osds.py +++ b/tests/functional/tests/osd/test_osds.py @@ -71,7 +71,7 @@ class TestOSDs(object): @pytest.mark.docker def test_all_docker_osds_are_up_and_in(self, node, host): - cmd = "sudo docker exec ceph-osd-{hostname}-sda ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( + cmd = "sudo docker exec ceph-osd-0 ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( hostname=node["vars"]["inventory_hostname"], cluster=node["cluster_name"] ) From 790f52f9347f5c683a2ee9007c3eec7ce82e33d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Tue, 2 Oct 2018 17:37:06 +0200 Subject: [PATCH 060/105] ceph-handler: change osd container check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that the container is named ceph-osd@ looking for something that contains a host is not necessary. This is also backward compatible as it will continue to match container names with hostname in them. Signed-off-by: Sébastien Han --- roles/ceph-handler/tasks/check_running_containers.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-handler/tasks/check_running_containers.yml b/roles/ceph-handler/tasks/check_running_containers.yml index 111d11274..15acc5df2 100644 --- a/roles/ceph-handler/tasks/check_running_containers.yml +++ b/roles/ceph-handler/tasks/check_running_containers.yml @@ -9,7 +9,7 @@ - inventory_hostname in groups.get(mon_group_name, []) - name: check for an osd container - command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'" + command: "docker ps -q --filter='name=ceph-osd'" register: ceph_osd_container_stat changed_when: false failed_when: false From 2bea8d8ecf3e97d8635ecb3f81ebf838ffa176ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Tue, 2 Oct 2018 18:10:19 +0200 Subject: [PATCH 061/105] handler: add support for ceph-volume containerized restart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The restart script wasn't working with the current new addition of ceph-volume in container where now OSDs have the OSD id name in the container name. Signed-off-by: Sébastien Han --- roles/ceph-handler/templates/restart_osd_daemon.sh.j2 | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 index 15b255900..09de06d5c 100644 --- a/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 @@ -66,12 +66,16 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph- # We need to wait because it may take some time for the socket to actually exists COUNT=10 # Wait and ensure the socket exists after restarting the daemon - {% if containerized_deployment -%} + {% if containerized_deployment and osd_scenario != 'lvm' -%} id=$(get_dev_name "$unit") container_id=$(get_docker_id_from_dev_name "$id") wait_for_socket_in_docker "$container_id" osd_id=$whoami docker_exec="docker exec $container_id" + {% elif containerized_deployment and osd_scenario == 'lvm' %} + osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') + container_id=$(get_docker_id_from_dev_name "ceph-osd-${osd_id}") + docker_exec="docker exec $container_id" {% else %} osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') {% endif %} From bfe689094e4c453bb951449bbd34b1f4a8911bd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Tue, 2 Oct 2018 18:35:52 +0200 Subject: [PATCH 062/105] osd: do not run when lvm scenario MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This task was created for ceph-disk based deployments so it's not needed when osd are prepared with ceph-volume. Signed-off-by: Sébastien Han --- roles/ceph-osd/tasks/docker/start_docker_osd.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ceph-osd/tasks/docker/start_docker_osd.yml b/roles/ceph-osd/tasks/docker/start_docker_osd.yml index 403bcc43a..d9d197188 100644 --- a/roles/ceph-osd/tasks/docker/start_docker_osd.yml +++ b/roles/ceph-osd/tasks/docker/start_docker_osd.yml @@ -15,6 +15,8 @@ changed_when: false failed_when: false register: disk_list + when: + - osd_scenario != 'lvm' - name: generate ceph osd docker run script become: true From 05afaed803766b3dac697c05b520891ef8535ba1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 3 Oct 2018 16:52:14 +0200 Subject: [PATCH 063/105] doc: improve osd configuration section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simply add that all the scenarios support the containerized deployment option. Signed-off-by: Sébastien Han --- docs/source/osds/scenarios.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index d7d074d18..c6f06a574 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -15,6 +15,9 @@ Since the Ceph mimic release, it is preferred to use the :ref:`lvm scenario ` that uses the ``ceph-volume`` provisioning tool. Any other scenario will cause deprecation warnings. +All the scenarios mentionned above support both containerized and non-containerized cluster. +As a reminder, deploying a containerized cluster can be done by setting ``containerized_deployment`` +to ``True``. .. _osd_scenario_lvm: From 90c66a5848284fdc90e6e0993abb678f30440a49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 12 Jul 2018 20:30:59 +0200 Subject: [PATCH 064/105] ci: test lvm in containerized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- .../7/bs-lvm-osds-container/Vagrantfile | 1 + .../bs-lvm-osds-container/ceph-override.json | 1 + .../7/bs-lvm-osds-container/group_vars/all | 25 +++++++ .../centos/7/bs-lvm-osds-container/hosts | 8 ++ .../vagrant_variables.yml | 73 +++++++++++++++++++ .../centos/7/lvm-batch-container/Vagrantfile | 1 + .../7/lvm-batch-container/ceph-override.json | 9 +++ .../7/lvm-batch-container/group_vars/all | 23 ++++++ .../centos/7/lvm-batch-container/hosts | 8 ++ .../lvm-batch-container/vagrant_variables.yml | 73 +++++++++++++++++++ .../centos/7/lvm-osds-container/Vagrantfile | 1 + .../7/lvm-osds-container/ceph-override.json | 1 + .../7/lvm-osds-container/group_vars/all | 28 +++++++ .../centos/7/lvm-osds-container/hosts | 8 ++ .../lvm-osds-container/vagrant_variables.yml | 73 +++++++++++++++++++ tox.ini | 10 ++- 16 files changed, 342 insertions(+), 1 deletion(-) create mode 120000 tests/functional/centos/7/bs-lvm-osds-container/Vagrantfile create mode 120000 tests/functional/centos/7/bs-lvm-osds-container/ceph-override.json create mode 100644 tests/functional/centos/7/bs-lvm-osds-container/group_vars/all create mode 100644 tests/functional/centos/7/bs-lvm-osds-container/hosts create mode 100644 tests/functional/centos/7/bs-lvm-osds-container/vagrant_variables.yml create mode 120000 tests/functional/centos/7/lvm-batch-container/Vagrantfile create mode 100644 tests/functional/centos/7/lvm-batch-container/ceph-override.json create mode 100644 tests/functional/centos/7/lvm-batch-container/group_vars/all create mode 100644 tests/functional/centos/7/lvm-batch-container/hosts create mode 100644 tests/functional/centos/7/lvm-batch-container/vagrant_variables.yml create mode 120000 tests/functional/centos/7/lvm-osds-container/Vagrantfile create mode 120000 tests/functional/centos/7/lvm-osds-container/ceph-override.json create mode 100644 tests/functional/centos/7/lvm-osds-container/group_vars/all create mode 100644 tests/functional/centos/7/lvm-osds-container/hosts create mode 100644 tests/functional/centos/7/lvm-osds-container/vagrant_variables.yml diff --git a/tests/functional/centos/7/bs-lvm-osds-container/Vagrantfile b/tests/functional/centos/7/bs-lvm-osds-container/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/bs-lvm-osds-container/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/bs-lvm-osds-container/ceph-override.json b/tests/functional/centos/7/bs-lvm-osds-container/ceph-override.json new file mode 120000 index 000000000..775cb006a --- /dev/null +++ b/tests/functional/centos/7/bs-lvm-osds-container/ceph-override.json @@ -0,0 +1 @@ +../cluster/ceph-override.json \ No newline at end of file diff --git a/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all b/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all new file mode 100644 index 000000000..1de1598e6 --- /dev/null +++ b/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all @@ -0,0 +1,25 @@ +--- + +ceph_origin: repository +ceph_repository: community +containerized_deployment: True +cluster: test +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/bs-lvm-osds-container/hosts b/tests/functional/centos/7/bs-lvm-osds-container/hosts new file mode 100644 index 000000000..d6c89012a --- /dev/null +++ b/tests/functional/centos/7/bs-lvm-osds-container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/bs-lvm-osds-container/vagrant_variables.yml b/tests/functional/centos/7/bs-lvm-osds-container/vagrant_variables.yml new file mode 100644 index 000000000..bd8309495 --- /dev/null +++ b/tests/functional/centos/7/bs-lvm-osds-container/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/atomic-host +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/centos/7/lvm-batch-container/Vagrantfile b/tests/functional/centos/7/lvm-batch-container/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/lvm-batch-container/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/lvm-batch-container/ceph-override.json b/tests/functional/centos/7/lvm-batch-container/ceph-override.json new file mode 100644 index 000000000..1a9600a14 --- /dev/null +++ b/tests/functional/centos/7/lvm-batch-container/ceph-override.json @@ -0,0 +1,9 @@ +{ + "ceph_conf_overrides": { + "global": { + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1 + } + }, + "ceph_mon_docker_memory_limit": "2g" +} diff --git a/tests/functional/centos/7/lvm-batch-container/group_vars/all b/tests/functional/centos/7/lvm-batch-container/group_vars/all new file mode 100644 index 000000000..0555ffa38 --- /dev/null +++ b/tests/functional/centos/7/lvm-batch-container/group_vars/all @@ -0,0 +1,23 @@ +--- + +containerized_deployment: True +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +monitor_interface: eth1 +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +crush_device_class: test +osd_scenario: lvm +copy_admin_key: true +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/lvm-batch-container/hosts b/tests/functional/centos/7/lvm-batch-container/hosts new file mode 100644 index 000000000..d6c89012a --- /dev/null +++ b/tests/functional/centos/7/lvm-batch-container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/lvm-batch-container/vagrant_variables.yml b/tests/functional/centos/7/lvm-batch-container/vagrant_variables.yml new file mode 100644 index 000000000..bd8309495 --- /dev/null +++ b/tests/functional/centos/7/lvm-batch-container/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/atomic-host +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/centos/7/lvm-osds-container/Vagrantfile b/tests/functional/centos/7/lvm-osds-container/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/lvm-osds-container/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/lvm-osds-container/ceph-override.json b/tests/functional/centos/7/lvm-osds-container/ceph-override.json new file mode 120000 index 000000000..775cb006a --- /dev/null +++ b/tests/functional/centos/7/lvm-osds-container/ceph-override.json @@ -0,0 +1 @@ +../cluster/ceph-override.json \ No newline at end of file diff --git a/tests/functional/centos/7/lvm-osds-container/group_vars/all b/tests/functional/centos/7/lvm-osds-container/group_vars/all new file mode 100644 index 000000000..0a29dff68 --- /dev/null +++ b/tests/functional/centos/7/lvm-osds-container/group_vars/all @@ -0,0 +1,28 @@ +--- + +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +monitor_interface: eth1 +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +copy_admin_key: true +containerized_deployment: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/lvm-osds-container/hosts b/tests/functional/centos/7/lvm-osds-container/hosts new file mode 100644 index 000000000..d6c89012a --- /dev/null +++ b/tests/functional/centos/7/lvm-osds-container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/lvm-osds-container/vagrant_variables.yml b/tests/functional/centos/7/lvm-osds-container/vagrant_variables.yml new file mode 100644 index 000000000..bd8309495 --- /dev/null +++ b/tests/functional/centos/7/lvm-osds-container/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/atomic-host +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tox.ini b/tox.ini index a041bc5a7..ae1baf68b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation} - {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch} + {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch,lvm_osds_container,lvm_batch_container} infra_lv_create skipsdist = True @@ -199,7 +199,11 @@ setenv= mimic: UPDATE_CEPH_STABLE_RELEASE = mimic mimic: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest lvm_osds: CEPH_STABLE_RELEASE = luminous + lvm_osds: PLAYBOOK = site.yml.sample + lvm_osds_container: CEPH_STABLE_RELEASE = luminous + lvm_osds_container: PLAYBOOK = site-docker.yml.sample bluestore_lvm_osds: CEPH_STABLE_RELEASE = luminous + bluestore_lvm_osds_container: CEPH_STABLE_RELEASE = luminous update_cluster: ROLLING_UPDATE = True update_docker_cluster: ROLLING_UPDATE = True deps= -r{toxinidir}/tests/requirements.txt @@ -231,8 +235,11 @@ changedir= update_cluster: {toxinidir}/tests/functional/centos/7/cluster switch_to_containers: {toxinidir}/tests/functional/centos/7/cluster lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds + lvm_osds_container: {toxinidir}/tests/functional/centos/7/lvm-osds-container lvm_batch: {toxinidir}/tests/functional/centos/7/lvm-batch + lvm_batch_container: {toxinidir}/tests/functional/centos/7/lvm-batch-container bluestore_lvm_osds: {toxinidir}/tests/functional/centos/7/bs-lvm-osds + bluestore_lvm_osds_container: {toxinidir}/tests/functional/centos/7/bs-lvm-osds-container purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation @@ -244,6 +251,7 @@ commands= bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml + lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml bluestore_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml From d2ca24eca8849a8f2df748c3f7c4e0d6885b6298 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 9 Oct 2018 16:45:05 -0400 Subject: [PATCH 065/105] tests: do not install lvm2 on atomic host we need to detect whether we are running on atomic host to not try to install lvm2 package. Signed-off-by: Guillaume Abrioux --- tests/functional/lvm_setup.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml index ef30614eb..c6908a4be 100644 --- a/tests/functional/lvm_setup.yml +++ b/tests/functional/lvm_setup.yml @@ -1,15 +1,30 @@ --- - hosts: osds - gather_facts: true + gather_facts: false become: yes tasks: + - name: check if it is atomic host + stat: + path: /run/ostree-booted + register: stat_ostree + tags: + - always + + - name: set_fact is_atomic + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + tags: + - always + # Some images may not have lvm2 installed - name: install lvm2 package: name: lvm2 state: present + when: + - not is_atomic - name: create physical volume command: pvcreate /dev/sdb From 31a0438cb22a0397cbbf81d6b6521936ead2ca0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 3 Oct 2018 19:52:42 +0200 Subject: [PATCH 066/105] ceph_volume: refactor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit does a couple of things: * Avoid code duplication * Clarify the code * add more unit tests * add myself to the author of the module Signed-off-by: Sébastien Han --- library/ceph_volume.py | 607 +++++++++--------- library/test_ceph_volume.py | 234 +++++++ roles/ceph-osd/tasks/scenarios/lvm.yml | 3 +- .../7/bs-lvm-osds-container/group_vars/all | 6 +- .../centos/7/bs-lvm-osds/group_vars/all | 1 - .../7/lvm-batch-container/group_vars/all | 7 +- .../7/lvm-osds-container/group_vars/all | 5 + tox.ini | 6 + 8 files changed, 566 insertions(+), 303 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index cca05cab6..416867e6b 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -123,9 +123,14 @@ options: assigned or not depending on how the playbook runs. required: false default: None + list: + description: + - List potential Ceph LVM metadata on a device + required: false author: - Andrew Schoen (@andrewschoen) + - Sebastien Han ''' EXAMPLES = ''' @@ -144,7 +149,7 @@ EXAMPLES = ''' action: create -- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa E501 +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa e501 ceph_volume: objectstore: bluestore data: data-lv @@ -158,211 +163,165 @@ EXAMPLES = ''' from ansible.module_utils.basic import AnsibleModule # noqa 4502 -def container_exec(binary, container_image): +def fatal(message, module): ''' - Build the CLI to run a command inside a container + Report a fatal error and exit ''' - command_exec = ["docker", "run", "--rm", "--privileged", "--net=host", - "-v", "/dev:/dev", "-v", "/etc/ceph:/etc/ceph:z", - "-v", "/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket", - "-v", "/var/lib/ceph/:/var/lib/ceph/:z", - os.path.join("--entrypoint=" + binary), + if module: + module.fail_json(msg=message, changed=False, rc=1) + else: + raise(Exception(message)) + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + command_exec = ['docker', 'run', '--rm', '--privileged', '--net=host', + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + os.path.join('--entrypoint=' + binary), container_image] return command_exec +def build_ceph_volume_cmd(action, container_image, cluster=None): + ''' + Build the ceph-volume command + ''' + + if container_image: + binary = 'ceph-volume' + cmd = container_exec( + binary, container_image) + else: + binary = ['ceph-volume'] + cmd = binary + + if cluster: + cmd.extend(['--cluster', cluster]) + + cmd.append('lvm') + cmd.append(action) + + return cmd + + +def exec_command(module, cmd): + ''' + Execute command + ''' + + rc, out, err = module.run_command(cmd) + return rc, cmd, out, err + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + def get_data(data, data_vg): if data_vg: - data = "{0}/{1}".format(data_vg, data) + data = '{0}/{1}'.format(data_vg, data) return data def get_journal(journal, journal_vg): if journal_vg: - journal = "{0}/{1}".format(journal_vg, journal) + journal = '{0}/{1}'.format(journal_vg, journal) return journal def get_db(db, db_vg): if db_vg: - db = "{0}/{1}".format(db_vg, db) + db = '{0}/{1}'.format(db_vg, db) return db def get_wal(wal, wal_vg): if wal_vg: - wal = "{0}/{1}".format(wal_vg, wal) + wal = '{0}/{1}'.format(wal_vg, wal) return wal -def _list(module): - cmd = [ - 'ceph-volume', - 'lvm', - 'list', - '--format=json', - ] +def batch(module, container_image): + ''' + Batch prepare OSD devices + ''' - result = dict( - changed=False, - cmd=cmd, - stdout='', - stderr='', - rc='', - start='', - end='', - delta='', - ) - - if module.check_mode: - return result - - startd = datetime.datetime.now() - - rc, out, err = module.run_command(cmd, encoding=None) - - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - stdout=out.rstrip(b"\r\n"), - stderr=err.rstrip(b"\r\n"), - rc=rc, - start=str(startd), - end=str(endd), - delta=str(delta), - changed=True, - ) - - if rc != 0: - module.fail_json(msg='non-zero return code', **result) - - module.exit_json(**result) - - -def batch(module): + # get module variables cluster = module.params['cluster'] objectstore = module.params['objectstore'] - batch_devices = module.params['batch_devices'] + batch_devices = module.params.get('batch_devices', None) crush_device_class = module.params.get('crush_device_class', None) - dmcrypt = module.params['dmcrypt'] - osds_per_device = module.params['osds_per_device'] - journal_size = module.params['journal_size'] - block_db_size = module.params['block_db_size'] - report = module.params['report'] - subcommand = 'batch' + journal_size = module.params.get('journal_size', None) + block_db_size = module.params.get('block_db_size', None) + dmcrypt = module.params.get('dmcrypt', None) + osds_per_device = module.params.get('osds_per_device', None) + + if not osds_per_device: + fatal('osds_per_device must be provided if action is "batch"', module) + + if osds_per_device < 1: + fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa E501 if not batch_devices: - module.fail_json( - msg='batch_devices must be provided if action is "batch"', changed=False, rc=1) # noqa 4502 + fatal('batch_devices must be provided if action is "batch"', module) - if "CEPH_CONTAINER_IMAGE" in os.environ: - container_image = os.getenv("CEPH_CONTAINER_IMAGE") - else: - container_image = None - - cmd = ceph_volume_cmd(subcommand, container_image, cluster) - cmd.extend(["--%s" % objectstore]) - cmd.extend("--yes") - cmd.extend("--no-systemd ") + # Build the CLI + action = 'batch' + cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + cmd.append('--yes') if crush_device_class: - cmd.extend(["--crush-device-class", crush_device_class]) + cmd.extend(['--crush-device-class', crush_device_class]) if dmcrypt: - cmd.append("--dmcrypt") + cmd.append('--dmcrypt') if osds_per_device > 1: - cmd.extend(["--osds-per-device", osds_per_device]) + cmd.extend(['--osds-per-device', osds_per_device]) - if objectstore == "filestore": - cmd.extend(["--journal-size", journal_size]) + if objectstore == 'filestore': + cmd.extend(['--journal-size', journal_size]) - if objectstore == "bluestore" and block_db_size != "-1": - cmd.extend(["--block-db-size", block_db_size]) - - report_flags = [ - "--report", - "--format=json", - ] + if objectstore == 'bluestore' and block_db_size != '-1': + cmd.extend(['--block-db-size', block_db_size]) cmd.extend(batch_devices) - result = dict( - changed=False, - cmd=cmd, - stdout='', - stderr='', - rc='', - start='', - end='', - delta='', - ) - - if module.check_mode: - return result - - startd = datetime.datetime.now() - - report_cmd = copy.copy(cmd) - report_cmd.extend(report_flags) - - rc, out, err = module.run_command(report_cmd, encoding=None) - try: - report_result = json.loads(out) - except ValueError: - result = dict( - cmd=report_cmd, - stdout=out.rstrip(b"\r\n"), - stderr=err.rstrip(b"\r\n"), - rc=rc, - changed=True, - ) - module.fail_json(msg='non-zero return code', **result) - - if not report: - rc, out, err = module.run_command(cmd, encoding=None) - else: - cmd = report_cmd - - endd = datetime.datetime.now() - delta = endd - startd - - changed = True - if not report: - changed = report_result['changed'] - - result = dict( - cmd=cmd, - stdout=out.rstrip(b"\r\n"), - stderr=err.rstrip(b"\r\n"), - rc=rc, - start=str(startd), - end=str(endd), - delta=str(delta), - changed=changed, - ) - - if rc != 0: - module.fail_json(msg='non-zero return code', **result) - - module.exit_json(**result) + return cmd def ceph_volume_cmd(subcommand, container_image, cluster=None): + ''' + Build ceph-volume initial command + ''' if container_image: - binary = "ceph-volume" + binary = 'ceph-volume' cmd = container_exec( binary, container_image) else: - binary = ["ceph-volume"] + binary = ['ceph-volume'] cmd = binary if cluster: - cmd.extend(["--cluster", cluster]) + cmd.extend(['--cluster', cluster]) cmd.append('lvm') cmd.append(subcommand) @@ -370,19 +329,17 @@ def ceph_volume_cmd(subcommand, container_image, cluster=None): return cmd -def activate_osd(module, container_image=None): - subcommand = "activate" - cmd = ceph_volume_cmd(subcommand) - cmd.append("--all") +def prepare_or_create_osd(module, action, container_image): + ''' + Prepare or create OSD devices + ''' - return True - - -def prepare_osd(module): + # get module variables cluster = module.params['cluster'] objectstore = module.params['objectstore'] data = module.params['data'] data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) journal = module.params.get('journal', None) journal_vg = module.params.get('journal_vg', None) db = module.params.get('db', None) @@ -390,105 +347,79 @@ def prepare_osd(module): wal = module.params.get('wal', None) wal_vg = module.params.get('wal_vg', None) crush_device_class = module.params.get('crush_device_class', None) - dmcrypt = module.params['dmcrypt'] - subcommand = "prepare" + dmcrypt = module.params.get('dmcrypt', None) - if "CEPH_CONTAINER_IMAGE" in os.environ: - container_image = os.getenv("CEPH_CONTAINER_IMAGE") - else: - container_image = None - - cmd = ceph_volume_cmd(subcommand, container_image, cluster) - cmd.extend(["--%s" % objectstore]) - cmd.append("--data") - - data = get_data(data, data_vg) + # Build the CLI + cmd = build_ceph_volume_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + cmd.append('--data') cmd.append(data) if journal: journal = get_journal(journal, journal_vg) - cmd.extend(["--journal", journal]) + cmd.extend(['--journal', journal]) if db: db = get_db(db, db_vg) - cmd.extend(["--block.db", db]) + cmd.extend(['--block.db', db]) if wal: wal = get_wal(wal, wal_vg) - cmd.extend(["--block.wal", wal]) + cmd.extend(['--block.wal', wal]) if crush_device_class: - cmd.extend(["--crush-device-class", crush_device_class]) + cmd.extend(['--crush-device-class', crush_device_class]) if dmcrypt: - cmd.append("--dmcrypt") + cmd.append('--dmcrypt') - result = dict( - changed=False, - cmd=cmd, - stdout='', - stderr='', - rc='', - start='', - end='', - delta='', - ) - - if module.check_mode: - return result - - # check to see if osd already exists - # FIXME: this does not work when data is a raw device - # support for 'lvm list' and raw devices - # was added with https://github.com/ceph/ceph/pull/20620 but - # has not made it to a luminous release as of 12.2.4 - ceph_volume_list_cmd_args = ["lvm", "list", data] - if container_image: - binary = "ceph-volume" - ceph_volume_list_cmd = container_exec( - binary, container_image) + ceph_volume_list_cmd_args - else: - binary = ["ceph-volume"] - ceph_volume_list_cmd = binary + ceph_volume_list_cmd_args - - rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None) - if rc == 0: - result["stdout"] = "skipped, since {0} is already used for an osd".format( # noqa E501 - data) - result['rc'] = 0 - module.exit_json(**result) - - startd = datetime.datetime.now() - - rc, out, err = module.run_command(cmd, encoding=None) - - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - stdout=out.rstrip(b"\r\n"), - stderr=err.rstrip(b"\r\n"), - rc=rc, - start=str(startd), - end=str(endd), - delta=str(delta), - changed=True, - ) - - if rc != 0: - module.fail_json(msg='non-zero return code', **result) - - module.exit_json(**result) + return cmd -def zap_devices(module): - """ +def list_osd(module, container_image): + ''' + List will detect wether or not a device has Ceph LVM Metadata + ''' + + # get module variables + cluster = module.params['cluster'] + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + + # Build the CLI + action = 'list' + cmd = build_ceph_volume_cmd(action, container_image, cluster) + if data: + cmd.append(data) + cmd.append('--format=json') + + return cmd + + +def activate_osd(): + ''' + Activate all the OSDs on a machine + ''' + + # build the CLI + action = 'activate' + container_image = None + cmd = build_ceph_volume_cmd(action, container_image) + cmd.append('--all') + + return cmd + + +def zap_devices(module, container_image): + ''' Will run 'ceph-volume lvm zap' on all devices, lvs and partitions used to create the OSD. The --destroy flag is always passed so that if an OSD was originally created with a raw device or partition for 'data' then any lvs that were created by ceph-volume are removed. - """ + ''' + + # get module variables data = module.params['data'] data_vg = module.params.get('data_vg', None) journal = module.params.get('journal', None) @@ -497,65 +428,27 @@ def zap_devices(module): db_vg = module.params.get('db_vg', None) wal = module.params.get('wal', None) wal_vg = module.params.get('wal_vg', None) - - base_zap_cmd = [ - 'ceph-volume', - 'lvm', - 'zap', - # for simplicity always --destroy. It will be needed - # for raw devices and will noop for lvs. - '--destroy', - ] - - commands = [] - data = get_data(data, data_vg) - commands.append(base_zap_cmd + [data]) + # build the CLI + action = 'zap' + cmd = build_ceph_volume_cmd(action, container_image) + cmd.append('--destroy') + cmd.append(data) if journal: journal = get_journal(journal, journal_vg) - commands.append(base_zap_cmd + [journal]) + cmd.extend([journal]) if db: db = get_db(db, db_vg) - commands.append(base_zap_cmd + [db]) + cmd.extend([db]) if wal: wal = get_wal(wal, wal_vg) - commands.append(base_zap_cmd + [wal]) + cmd.extend([wal]) - result = dict( - changed=True, - rc=0, - ) - command_results = [] - for cmd in commands: - startd = datetime.datetime.now() - - rc, out, err = module.run_command(cmd, encoding=None) - - endd = datetime.datetime.now() - delta = endd - startd - - cmd_result = dict( - cmd=cmd, - stdout_lines=out.split("\n"), - stderr_lines=err.split("\n"), - rc=rc, - start=str(startd), - end=str(endd), - delta=str(delta), - ) - - if rc != 0: - module.fail_json(msg='non-zero return code', **cmd_result) - - command_results.append(cmd_result) - - result["commands"] = command_results - - module.exit_json(**result) + return cmd def run_module(): @@ -577,8 +470,8 @@ def run_module(): dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), - journal_size=dict(type='str', required=False, default="5120"), - block_db_size=dict(type='str', required=False, default="-1"), + journal_size=dict(type='str', required=False, default='5120'), + block_db_size=dict(type='str', required=False, default='-1'), report=dict(type='bool', required=False, default=False), containerized=dict(type='str', required=False, default=False), ) @@ -588,24 +481,140 @@ def run_module(): supports_check_mode=True ) + result = dict( + changed=False, + stdout='', + stderr='', + rc='', + start='', + end='', + delta='', + ) + + if module.check_mode: + return result + + # start execution + startd = datetime.datetime.now() + + # get the desired action action = module.params['action'] - if action == "create": - prepare_osd(module) - activate_osd(module) - elif action == "prepare": - prepare_osd(module) - elif action == "activate": - activate_osd(module) - elif action == "zap": - zap_devices(module) - elif action == "batch": - batch(module) - elif action == "list": - _list(module) + # will return either the image name or None + container_image = is_containerized() - module.fail_json( - msg='State must either be "present" or "absent".', changed=False, rc=1) + # Assume the task's status will be 'changed' + changed = True + + if action == 'create' or action == 'prepare': + # First test if the device has Ceph LVM Metadata + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + # list_osd returns a dict, if the dict is empty this means + # we can not check the return code since it's not consistent + # with the plain output + # see: http://tracker.ceph.com/issues/36329 + # FIXME: it's probably less confusing to check for rc + + # convert out to json, ansible return a string... + out_dict = json.loads(out) + if out_dict: + data = module.params['data'] + result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 + data) + result['rc'] = 0 + module.exit_json(**result) + + # Prepare or create the OSD + rc, cmd, out, err = exec_command( + module, prepare_or_create_osd(module, action, container_image)) + + elif action == 'activate': + if container_image: + fatal( + "This is not how container's activation happens, nothing to activate", module) # noqa E501 + + # Activate the OSD + rc, cmd, out, err = exec_command( + module, activate_osd()) + + elif action == 'zap': + # Zap the OSD + rc, cmd, out, err = exec_command( + module, zap_devices(module, container_image)) + + elif action == 'list': + # List Ceph LVM Metadata on a device + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + elif action == 'batch': + # Batch prepare AND activate OSDs + if container_image: + fatal( + 'Batch operation is currently not supported on containerized deployment (https://tracker.ceph.com/issues/36363)', module) # noqa E501 + + report = module.params.get('report', None) + + # Add --report flag for the idempotency test + report_flags = [ + '--report', + '--format=json', + ] + + cmd = batch(module, container_image) + batch_report_cmd = copy.copy(cmd) + batch_report_cmd.extend(report_flags) + + # Run batch --report to see what's going to happen + # Do not run the batch command if there is nothing to do + rc, cmd, out, err = exec_command( + module, batch_report_cmd) + try: + report_result = json.loads(out) + except ValueError: + result = dict( + cmd=cmd, + stdout=out.rstrip(b"\r\n"), + stderr=err.rstrip(b"\r\n"), + rc=rc, + changed=changed, + ) + module.fail_json(msg='non-zero return code', **result) + + if not report: + # if not asking for a report, let's just run the batch command + changed = report_result['changed'] + if changed: + # Batch prepare the OSD + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + else: + cmd = batch_report_cmd + + else: + module.fail_json( + msg='State must either be "create" or "prepare" or "activate" or "list" or "zap" or "batch".', changed=False, rc=1) # noqa E501 + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip(b'\r\n'), + stderr=err.rstrip(b'\r\n'), + changed=changed, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) def main(): diff --git a/library/test_ceph_volume.py b/library/test_ceph_volume.py index 46feff0d5..2f090a6ac 100644 --- a/library/test_ceph_volume.py +++ b/library/test_ceph_volume.py @@ -1,4 +1,5 @@ from . import ceph_volume +from ansible.compat.tests.mock import MagicMock class TestCephVolumeModule(object): @@ -34,3 +35,236 @@ class TestCephVolumeModule(object): def test_wal_with_vg(self): result = ceph_volume.get_wal("wal-lv", "wal-vg") assert result == "wal-vg/wal-lv" + + def test_container_exec(sefl): + fake_binary = "ceph-volume" + fake_container_image = "docker.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501 + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '--entrypoint=ceph-volume', + 'docker.io/ceph/daemon:latest-luminous'] + result = ceph_volume.container_exec(fake_binary, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda'} + fake_container_image = "docker.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501 + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '--entrypoint=ceph-volume', + 'docker.io/ceph/daemon:latest-luminous', + 'lvm', + 'zap', + '--destroy', + '/dev/sda'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda'} + fake_container_image = None + expected_command_list = ['ceph-volume', + 'lvm', + 'zap', + '--destroy', + '/dev/sda'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_activate_osd(self): + expected_command_list = ['ceph-volume', + 'lvm', + 'activate', + '--all'] + result = ceph_volume.activate_osd() + assert result == expected_command_list + + def test_list_osd(self): + fake_module = MagicMock() + fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'list', + '/dev/sda', + '--format=json', + ] + result = ceph_volume.list_osd(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} + fake_container_image = "docker.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501 + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '--entrypoint=ceph-volume', + 'docker.io/ceph/daemon:latest-luminous', + '--cluster', + 'ceph', + 'lvm', + 'list', + '/dev/sda', + '--format=json', + ] + result = ceph_volume.list_osd(fake_module, fake_container_image) + assert result == expected_command_list + + def test_create_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': 'filestore', + 'cluster': 'ceph', } + + fake_action = "create" + fake_container_image = "docker.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501 + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '--entrypoint=ceph-volume', + 'docker.io/ceph/daemon:latest-luminous', + '--cluster', + 'ceph', + 'lvm', + 'create', + '--filestore', + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + def test_create_osd(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': 'filestore', + 'cluster': 'ceph', } + + fake_container_image = None + fake_action = "create" + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'create', + '--filestore', + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + def test_prepare_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': 'filestore', + 'cluster': 'ceph', } + + fake_action = "prepare" + fake_container_image = "docker.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501 + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '--entrypoint=ceph-volume', + 'docker.io/ceph/daemon:latest-luminous', + '--cluster', + 'ceph', + 'lvm', + 'prepare', + '--filestore', + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + def test_prepare_osd(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': 'filestore', + 'cluster': 'ceph', } + + fake_container_image = None + fake_action = "prepare" + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'prepare', + '--filestore', + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + def test_batch_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': 'filestore', + 'journal_size': '100', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"]} + + fake_container_image = "docker.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501 + '-v', '/run/lock/lvm:/run/lock/lvm:z', + '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 + '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '--entrypoint=ceph-volume', + 'docker.io/ceph/daemon:latest-luminous', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--filestore', + '--yes', + '--journal-size', + '100', + '/dev/sda', + '/dev/sdb'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + def test_batch_osd(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': 'filestore', + 'journal_size': '100', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--filestore', + '--yes', + '--journal-size', + '100', + '/dev/sda', + '/dev/sdb'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml index bff1cf4cb..3e759ce53 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -17,4 +17,5 @@ environment: CEPH_VOLUME_DEBUG: 1 CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" - with_items: "{{ lvm_volumes }}" \ No newline at end of file + with_items: "{{ lvm_volumes }}" + tags: prepare_osd \ No newline at end of file diff --git a/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all b/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all index 1de1598e6..6f4e785cc 100644 --- a/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all +++ b/tests/functional/centos/7/bs-lvm-osds-container/group_vars/all @@ -1,9 +1,12 @@ --- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + ceph_origin: repository ceph_repository: community containerized_deployment: True -cluster: test public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" monitor_interface: eth1 @@ -23,3 +26,4 @@ os_tuning_params: ceph_conf_overrides: global: osd_pool_default_size: 1 +ceph_osd_docker_run_script_path: /var/tmp \ No newline at end of file diff --git a/tests/functional/centos/7/bs-lvm-osds/group_vars/all b/tests/functional/centos/7/bs-lvm-osds/group_vars/all index f30393671..78e47df51 100644 --- a/tests/functional/centos/7/bs-lvm-osds/group_vars/all +++ b/tests/functional/centos/7/bs-lvm-osds/group_vars/all @@ -2,7 +2,6 @@ ceph_origin: repository ceph_repository: community -cluster: test public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" monitor_interface: eth1 diff --git a/tests/functional/centos/7/lvm-batch-container/group_vars/all b/tests/functional/centos/7/lvm-batch-container/group_vars/all index 0555ffa38..6ed0becf0 100644 --- a/tests/functional/centos/7/lvm-batch-container/group_vars/all +++ b/tests/functional/centos/7/lvm-batch-container/group_vars/all @@ -1,5 +1,9 @@ --- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + containerized_deployment: True ceph_origin: repository ceph_repository: community @@ -14,10 +18,11 @@ crush_device_class: test osd_scenario: lvm copy_admin_key: true devices: - - /dev/sdb + - /dev/sdb - /dev/sdc os_tuning_params: - { name: fs.file-max, value: 26234859 } ceph_conf_overrides: global: osd_pool_default_size: 1 +ceph_osd_docker_run_script_path: /var/tmp \ No newline at end of file diff --git a/tests/functional/centos/7/lvm-osds-container/group_vars/all b/tests/functional/centos/7/lvm-osds-container/group_vars/all index 0a29dff68..9b826bd7f 100644 --- a/tests/functional/centos/7/lvm-osds-container/group_vars/all +++ b/tests/functional/centos/7/lvm-osds-container/group_vars/all @@ -1,5 +1,9 @@ --- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + ceph_origin: repository ceph_repository: community cluster: ceph @@ -26,3 +30,4 @@ os_tuning_params: ceph_conf_overrides: global: osd_pool_default_size: 1 +ceph_osd_docker_run_script_path: /var/tmp \ No newline at end of file diff --git a/tox.ini b/tox.ini index ae1baf68b..0d1f2ddb9 100644 --- a/tox.ini +++ b/tox.ini @@ -204,6 +204,9 @@ setenv= lvm_osds_container: PLAYBOOK = site-docker.yml.sample bluestore_lvm_osds: CEPH_STABLE_RELEASE = luminous bluestore_lvm_osds_container: CEPH_STABLE_RELEASE = luminous + bluestore_lvm_osds_container: PLAYBOOK = site-docker.yml.sample + lvm_batch_container: PLAYBOOK = site-docker.yml.sample + lvm_batch_container: CEPH_STABLE_RELEASE = luminous update_cluster: ROLLING_UPDATE = True update_docker_cluster: ROLLING_UPDATE = True deps= -r{toxinidir}/tests/requirements.txt @@ -253,6 +256,9 @@ commands= lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml bluestore_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml + bluestore_lvm_osds_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml + lvm_batch: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml + lvm_batch_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup" From 9fccffa1cac2e2b527ad35e7398db6f20b79b835 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 26 Sep 2018 14:24:26 +0200 Subject: [PATCH 067/105] switch: allow switch big clusters (more than 99 osds) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current regex had a limitation of 99 OSDs, now this limit has been removed and regardless the number of OSDs they will all be collected. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1630430 Signed-off-by: Sébastien Han --- ...tch-from-non-containerized-to-containerized-ceph-daemons.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 296edd446..4d9a56530 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -199,7 +199,7 @@ pre_tasks: - name: collect running osds and ceph-disk unit(s) shell: | - systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]{1,2}.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service' + systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service' register: running_osds changed_when: false failed_when: false From fa38b86cf83d68e756feb6d016a8c209478c9c5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 12 Oct 2018 18:58:41 +0200 Subject: [PATCH 068/105] test: fix docker test for lvm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CI is still running ceph-disk tests upstream. So until https://github.com/ceph/ceph-ansible/pull/3187 is merged nothing will pass anymore. Signed-off-by: Sébastien Han --- tests/functional/tests/osd/test_osds.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/functional/tests/osd/test_osds.py b/tests/functional/tests/osd/test_osds.py index feafc219b..851c7d7f5 100644 --- a/tests/functional/tests/osd/test_osds.py +++ b/tests/functional/tests/osd/test_osds.py @@ -71,8 +71,15 @@ class TestOSDs(object): @pytest.mark.docker def test_all_docker_osds_are_up_and_in(self, node, host): - cmd = "sudo docker exec ceph-osd-0 ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( - hostname=node["vars"]["inventory_hostname"], + osd_scenario = node["vars"].get('osd_scenario', False) + if osd_scenario in ['lvm', 'lvm-batch']: + osd_id = "0" + else: + hostname = node["vars"]["inventory_hostname"] + osd_id = os.path.join(hostname+"-sda") + + cmd = "sudo docker exec ceph-osd-{osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( + osd_id=osd_id, cluster=node["cluster_name"] ) output = json.loads(host.check_output(cmd)) From 40b7747af7b3d139b3017b53f78ab52fd1082a92 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 10 Oct 2018 15:24:22 -0400 Subject: [PATCH 069/105] remove jewel support As of now, we should no longer support Jewel in ceph-ansible. The latest ceph-ansible release supporting Jewel is `stable-3.1`. Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/rolling_update.yml | 30 ++--------- roles/ceph-defaults/tasks/facts.yml | 2 +- roles/ceph-fetch-keys/tasks/main.yml | 7 +-- .../templates/restart_rbd_mirror_daemon.sh.j2 | 5 +- roles/ceph-infra/tasks/configure_firewall.yml | 1 - .../ceph-mds/tasks/create_mds_filesystems.yml | 2 - roles/ceph-mgr/tasks/main.yml | 10 +--- roles/ceph-mon/tasks/ceph_keys.yml | 12 ----- roles/ceph-mon/tasks/crush_rules.yml | 4 +- roles/ceph-mon/tasks/deploy_monitors.yml | 14 +---- roles/ceph-mon/tasks/docker/copy_configs.yml | 12 +---- roles/ceph-mon/tasks/docker/main.yml | 1 - .../tasks/ceph_disk_cli_options_facts.yml | 22 -------- roles/ceph-osd/tasks/openstack_config.yml | 17 +------ roles/ceph-rbd-mirror/tasks/common.yml | 20 +------- .../tasks/docker/copy_configs.yml | 8 +-- roles/ceph-rbd-mirror/tasks/pre_requisite.yml | 16 +----- .../tasks/start_rbd_mirror.yml | 17 +------ roles/ceph-rgw/tasks/main.yml | 1 - site-docker.yml.sample | 12 +---- site.yml.sample | 12 ----- tests/conftest.py | 16 ------ .../tests/rbd-mirror/test_rbd_mirror.py | 51 +++---------------- tests/functional/tests/rgw/test_rgw.py | 1 - tox.ini | 8 +-- 25 files changed, 26 insertions(+), 275 deletions(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index f81fbd61d..0dfdd0a52 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -19,7 +19,6 @@ become: false vars: - mgr_group_name: mgrs - - jewel_minor_update: False vars_prompt: - name: ireallymeanit @@ -41,7 +40,6 @@ fail: msg: "Please add a mgr host to your inventory." when: - - not jewel_minor_update - groups.get(mgr_group_name, []) | length == 0 @@ -243,7 +241,6 @@ - not containerized_deployment - cephx - groups.get(mgr_group_name, []) | length > 0 - - ceph_release_num[ceph_release] >= ceph_release_num.luminous delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups.get(mgr_group_name, []) }}" @@ -262,7 +259,6 @@ - cephx - groups.get(mgr_group_name, []) | length > 0 - inventory_hostname == groups[mon_group_name]|last - - ceph_release_num[ceph_release] >= ceph_release_num.luminous delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups.get(mgr_group_name, []) }}" @@ -294,9 +290,7 @@ - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config - - { role: ceph-mgr, - when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or - (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" } + - ceph-mgr post_tasks: - name: start ceph mgr @@ -323,7 +317,6 @@ health_osd_check_retries: 40 health_osd_check_delay: 30 upgrade_ceph_packages: True - jewel_minor_update: False hosts: - "{{ osd_group_name|default('osds') }}" @@ -397,15 +390,11 @@ command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" register: ceph_versions delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update - name: set_fact ceph_versions_osd set_fact: ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update # length == 1 means there is a single osds versions entry # thus all the osds are running the same version @@ -415,7 +404,6 @@ when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - ceph_versions_osd | string is search("ceph version 10") - - not jewel_minor_update - name: get num_pgs - non container command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" @@ -437,8 +425,6 @@ - name: unset osd flags - vars: - - jewel_minor_update: False hosts: - "{{ mon_group_name|default('mons') }}" @@ -466,15 +452,11 @@ command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" register: ceph_versions delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update - name: set_fact ceph_versions_osd set_fact: ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update # length == 1 means there is a single osds versions entry # thus all the osds are running the same version @@ -484,8 +466,6 @@ when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - ceph_versions_osd | string is search("ceph version 12") - - not jewel_minor_update - - name: upgrade ceph mdss cluster @@ -666,9 +646,7 @@ - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config - - { role: ceph-nfs, - when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or - (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" } + - ceph-nfs post_tasks: - name: start nfs gateway @@ -722,9 +700,7 @@ - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config - - { role: ceph-iscsi-gw, - when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or - (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" } + - ceph-iscsi-gw post_tasks: - name: start rbd-target-gw diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml index ab899d648..8c1ddf652 100644 --- a/roles/ceph-defaults/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -238,4 +238,4 @@ - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) - ceph_current_status['servicemap'] is defined - ceph_current_status['servicemap']['services'] is defined - - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous'] + - ceph_current_status['servicemap']['services']['rgw'] is defined diff --git a/roles/ceph-fetch-keys/tasks/main.yml b/roles/ceph-fetch-keys/tasks/main.yml index 4990deb38..61f2f3a58 100644 --- a/roles/ceph-fetch-keys/tasks/main.yml +++ b/roles/ceph-fetch-keys/tasks/main.yml @@ -14,11 +14,6 @@ with_items: - "{{ ceph_keys.stdout_lines }}" -- name: set_fact bootstrap_rbd_keyring - set_fact: - bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: ceph_release_num[ceph_release] >= ceph_release_num.luminous - - name: copy keys to the ansible server fetch: src: "{{ item }}" @@ -30,4 +25,4 @@ - "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring" - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring" - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring" - - "{{ bootstrap_rbd_keyring | default([]) }}" + - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" diff --git a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 index 73a87086b..44d019bfe 100644 --- a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 @@ -6,13 +6,10 @@ RBD_MIRROR_NAME="{{ ansible_hostname }}" {% if containerized_deployment %} DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}" {% endif %} -{% if ceph_release_num[ceph_release] < ceph_release_num['luminous'] %} -SOCKET=/var/run/ceph/{{ cluster }}-client.admin.asok -{% else %} + # Backward compatibility $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok -{% endif %} # First, restart the daemon systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME} diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index c0e41d823..53329af34 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -48,7 +48,6 @@ state: enabled notify: restart firewalld when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - mgr_group_name is defined - mgr_group_name in group_names - firewalld_pkg_query.rc == 0 diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index 79bc81281..8418a5cc7 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -29,7 +29,6 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: - check_existing_cephfs.rc != 0 - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: allow multimds command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it" @@ -43,5 +42,4 @@ changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" when: - - ceph_release_num[ceph_release] >= ceph_release_num.jewel - mds_max_mds > 1 diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml index 410fa1d9a..9b5f1a41a 100644 --- a/roles/ceph-mgr/tasks/main.yml +++ b/roles/ceph-mgr/tasks/main.yml @@ -20,20 +20,14 @@ command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" register: _ceph_mgr_modules delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict) set_fact: _ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: set _disabled_ceph_mgr_modules fact set_fact: _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: disable ceph mgr enabled modules command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}" @@ -44,12 +38,10 @@ when: - item not in ceph_mgr_modules - not _ceph_mgr_modules.get('skipped') - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: add modules to ceph-mgr command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}" with_items: "{{ ceph_mgr_modules }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: - - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] + - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) \ No newline at end of file diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml index 23b12c6a8..1d998a0a6 100644 --- a/roles/ceph-mon/tasks/ceph_keys.yml +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -5,15 +5,6 @@ check_mode: no when: - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - -- name: collect admin and bootstrap keys - command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }} - changed_when: false - check_mode: no - when: - - cephx - - ceph_release_num[ceph_release] < ceph_release_num.luminous # NOTE (leseb): wait for mon discovery and quorum resolution # the admin key is not instantaneously created so we have to wait a bit @@ -81,7 +72,6 @@ - cephx - groups.get(mgr_group_name, []) | length > 0 - inventory_hostname == groups[mon_group_name]|last - - ceph_release_num[ceph_release] > ceph_release_num.jewel with_items: "{{ groups.get(mgr_group_name, []) }}" # once this gets backported github.com/ceph/ceph/pull/20983 @@ -108,8 +98,6 @@ - name: set_fact bootstrap_rbd_keyring set_fact: bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - name: copy keys to the ansible server fetch: diff --git a/roles/ceph-mon/tasks/crush_rules.yml b/roles/ceph-mon/tasks/crush_rules.yml index 96b449dda..2da7e8254 100644 --- a/roles/ceph-mon/tasks/crush_rules.yml +++ b/roles/ceph-mon/tasks/crush_rules.yml @@ -38,9 +38,9 @@ - inventory_hostname == groups.get(mon_group_name) | last - not item.get('skipped', false) -- name: set_fact osd_pool_default_crush_rule to osd_pool_default_crush_replicated_ruleset if release < luminous else osd_pool_default_crush_rule +- name: set_fact osd_pool_default_crush_rule set_fact: - osd_pool_default_crush_rule: "{{ 'osd_pool_default_crush_replicated_ruleset' if ceph_release_num[ceph_release] < ceph_release_num.luminous else 'osd_pool_default_crush_rule' }}" + osd_pool_default_crush_rule: "osd_pool_default_crush_rule" - name: insert new default crush rule into daemon to prevent restart command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set {{ osd_pool_default_crush_rule }} {{ info_ceph_default_crush_rule_yaml.rule_id }}" diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index e3de5a6fe..d792db1b4 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -48,7 +48,7 @@ mode: "0755" recurse: true -- name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous +- name: set_fact client_admin_ceph_authtool_cap set_fact: client_admin_ceph_authtool_cap: mon: allow * @@ -56,18 +56,6 @@ mds: allow mgr: allow * when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - cephx - - admin_secret != 'admin_secret' - -- name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous - set_fact: - client_admin_ceph_authtool_cap: - mon: allow * - osd: allow * - mds: allow - when: - - ceph_release_num[ceph_release] < ceph_release_num.luminous - cephx - admin_secret != 'admin_secret' diff --git a/roles/ceph-mon/tasks/docker/copy_configs.yml b/roles/ceph-mon/tasks/docker/copy_configs.yml index b7407a2b3..bcf0d3294 100644 --- a/roles/ceph-mon/tasks/docker/copy_configs.yml +++ b/roles/ceph-mon/tasks/docker/copy_configs.yml @@ -7,17 +7,7 @@ - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring - -- name: register rbd bootstrap key - set_fact: - bootstrap_rbd_keyring: - - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: ceph_release_num[ceph_release] >= ceph_release_num.luminous - -- name: merge rbd bootstrap key to config and keys paths - set_fact: - ceph_config_keys: "{{ ceph_config_keys + bootstrap_rbd_keyring }}" - when: ceph_release_num[ceph_release] >= ceph_release_num.luminous + - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring - name: stat for ceph config and keys local_action: diff --git a/roles/ceph-mon/tasks/docker/main.yml b/roles/ceph-mon/tasks/docker/main.yml index 5703761c7..032d49bf9 100644 --- a/roles/ceph-mon/tasks/docker/main.yml +++ b/roles/ceph-mon/tasks/docker/main.yml @@ -119,4 +119,3 @@ when: - not rolling_update - inventory_hostname == groups[mon_group_name]|last - - ceph_release_num[ceph_release] >= ceph_release_num.luminous diff --git a/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml index 11f4ede5e..d29a034eb 100644 --- a/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml +++ b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml @@ -5,7 +5,6 @@ when: - osd_objectstore == 'bluestore' - not dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - not containerized_deployment - name: set_fact ceph_disk_cli_options 'ceph_disk_cli_options' @@ -14,16 +13,6 @@ when: - osd_objectstore == 'filestore' - not dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - not containerized_deployment - -- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }}' - set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }}" - when: - - osd_objectstore == 'filestore' - - not dmcrypt - - ceph_release_num[ceph_release] < ceph_release_num.luminous - not containerized_deployment - name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --bluestore --dmcrypt' @@ -32,7 +21,6 @@ when: - osd_objectstore == 'bluestore' - dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - not containerized_deployment - name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --filestore --dmcrypt' @@ -41,16 +29,6 @@ when: - osd_objectstore == 'filestore' - dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - not containerized_deployment - -- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --dmcrypt' - set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" - when: - - osd_objectstore == 'filestore' - - dmcrypt - - ceph_release_num[ceph_release] < ceph_release_num.luminous - not containerized_deployment - name: set_fact docker_env_args '-e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}' diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index a74cdb39e..80fb571ad 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -18,27 +18,13 @@ delegate_to: "{{ groups[mon_group_name][0] }}" failed_when: false -- name: set_fact rule_name before luminous - set_fact: - rule_name: "replicated_ruleset" - when: - - ceph_release_num[ceph_release] < ceph_release_num['luminous'] - - not rolling_update - -- name: set_fact rule_name from luminous - set_fact: - rule_name: "replicated_rule" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - or (ceph_release_num[ceph_release] < ceph_release_num['luminous'] and rolling_update) - - name: create openstack pool(s) command: > {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.0.name }} {{ item.0.pg_num }} {{ item.0.pgp_num | default(item.0.pg_num) }} - {{ rule_name if item.0.rule_name | default(rule_name) == '' else item.0.rule_name | default(rule_name) }} + {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} {{ item.0.erasure_profile }} @@ -58,7 +44,6 @@ changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - item.application is defined - name: create openstack cephx key(s) diff --git a/roles/ceph-rbd-mirror/tasks/common.yml b/roles/ceph-rbd-mirror/tasks/common.yml index fa1912302..3750e2d06 100644 --- a/roles/ceph-rbd-mirror/tasks/common.yml +++ b/roles/ceph-rbd-mirror/tasks/common.yml @@ -1,21 +1,4 @@ --- -- name: set_fact copy_admin_key - true when ceph_release_num[ceph_release] < ceph_release_num.luminous - set_fact: - copy_admin_key: True - when: - - ceph_release_num[ceph_release] < ceph_release_num.luminous - -- name: copy ceph admin keyring when ceph_release_num[ceph_release] < ceph_release_num.luminous - copy: - src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring" - dest: "/etc/ceph/" - owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" - group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" - mode: "{{ ceph_keyring_permissions }}" - when: - - cephx - - copy_admin_key - - name: copy rbd-mirror bootstrap key copy: src: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" @@ -24,5 +7,4 @@ group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "{{ ceph_keyring_permissions }}" when: - - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous + - cephx \ No newline at end of file diff --git a/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml b/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml index 2133cbe4a..fe3c777ca 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml @@ -1,15 +1,9 @@ --- -- name: set_fact bootstrap_rbd_keyring - set_fact: - bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - name: set_fact ceph_config_keys set_fact: ceph_config_keys: - /etc/ceph/{{ cluster }}.client.admin.keyring - - "{{ bootstrap_rbd_keyring | default('') }}" + - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring - name: stat for ceph config and keys local_action: diff --git a/roles/ceph-rbd-mirror/tasks/pre_requisite.yml b/roles/ceph-rbd-mirror/tasks/pre_requisite.yml index ddefb2a50..29f917b89 100644 --- a/roles/ceph-rbd-mirror/tasks/pre_requisite.yml +++ b/roles/ceph-rbd-mirror/tasks/pre_requisite.yml @@ -8,17 +8,6 @@ tags: - package-install -- name: copy ceph admin key - copy: - src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring" - dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" - owner: "{{ ceph_uid }}" - group: "{{ ceph_uid }}" - mode: "0600" - when: - - cephx - - ceph_release_num[ceph_release] < ceph_release_num.luminous - - name: create rbd-mirror keyring command: ceph --cluster {{ cluster }} --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring auth get-or-create client.rbd-mirror.{{ ansible_hostname }} mon 'profile rbd' osd 'profile rbd' -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring args: @@ -26,7 +15,6 @@ changed_when: false when: - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - name: set rbd-mirror key permissions file: @@ -35,6 +23,4 @@ group: "ceph" mode: "{{ ceph_keyring_permissions }}" when: - - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - + - cephx \ No newline at end of file diff --git a/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml index 15def3bdb..0269bf57e 100644 --- a/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml +++ b/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml @@ -17,23 +17,12 @@ - ceph_rbd_mirror_systemd_overrides is defined - ansible_service_mgr == 'systemd' -- name: start and add that the rbd mirror service to the init sequence - service: - name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}" - state: started - enabled: yes - changed_when: false - when: - - ceph_release_num[ceph_release] < ceph_release_num.luminous - - name: stop and remove the generic rbd-mirror service instance service: name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}" state: stopped enabled: no changed_when: false - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous # This task is a workaround for rbd-mirror not starting after reboot # The upstream fix is: https://github.com/ceph/ceph/pull/17969 @@ -45,13 +34,11 @@ enabled: yes changed_when: false when: - - ceph_release_num[ceph_release] <= ceph_release_num.luminous + - ceph_release_num[ceph_release] == ceph_release_num.luminous - name: start and add the rbd-mirror service instance service: name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}" state: started enabled: yes - changed_when: false - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous + changed_when: false \ No newline at end of file diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 9d86b1c56..88935e0f7 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -19,7 +19,6 @@ when: - rgw_zone != "" - rgw_multisite - - ceph_release_num[ceph_release] >= ceph_release_num.jewel - name: include_tasks docker/main.yml include_tasks: docker/main.yml diff --git a/site-docker.yml.sample b/site-docker.yml.sample index ebf6927cc..b12385a06 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -122,11 +122,7 @@ - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-mgr - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph manager install 'Complete' run_once: true @@ -238,11 +234,7 @@ - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-nfs - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph nfs install 'Complete' run_once: true @@ -359,8 +351,8 @@ - { role: ceph-defaults, tags: ['ceph_update_config'] } - role: ceph-handler - ceph-docker-common - - { role: ceph-config, tags: ['ceph_update_config'], when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" } - - { role: ceph-iscsi-gw, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" } + - { role: ceph-config, tags: ['ceph_update_config'] } + - ceph-iscsi-gw post_tasks: - name: set ceph iscsi gw install 'Complete' run_once: true diff --git a/site.yml.sample b/site.yml.sample index 769aac56f..5ecba702c 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -124,11 +124,7 @@ - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-mgr - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph manager install 'Complete' run_once: true @@ -268,11 +264,7 @@ - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-nfs - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph nfs install 'Complete' run_once: true @@ -390,11 +382,7 @@ - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-iscsi-gw - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph iscsi gw install 'Complete' run_once: true diff --git a/tests/conftest.py b/tests/conftest.py index 324887f69..03d329321 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -57,22 +57,6 @@ def node(host, request): pytest.skip( "Not a valid test for non-containerized deployments or atomic hosts") # noqa E501 - if "mgrs" in group_names and ceph_stable_release == "jewel": - pytest.skip("mgr nodes can not be tested with ceph release jewel") - - if "nfss" in group_names and ceph_stable_release == "jewel": - pytest.skip("nfs nodes can not be tested with ceph release jewel") - - if group_names == ["iscsigws"] and ceph_stable_release == "jewel": - pytest.skip("iscsigws nodes can not be tested with ceph release jewel") # noqa E501 - - if request.node.get_closest_marker("from_luminous") and ceph_release_num[ceph_stable_release] < ceph_release_num['luminous']: # noqa E501 - pytest.skip( - "This test is only valid for releases starting from Luminous and above") # noqa E501 - - if request.node.get_closest_marker("before_luminous") and ceph_release_num[ceph_stable_release] >= ceph_release_num['luminous']: # noqa E501 - pytest.skip("This test is only valid for release before Luminous") - journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated" if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501 pytest.skip("Scenario is not using journal collocation") diff --git a/tests/functional/tests/rbd-mirror/test_rbd_mirror.py b/tests/functional/tests/rbd-mirror/test_rbd_mirror.py index 578b11beb..7bdd15494 100644 --- a/tests/functional/tests/rbd-mirror/test_rbd_mirror.py +++ b/tests/functional/tests/rbd-mirror/test_rbd_mirror.py @@ -8,63 +8,28 @@ class TestRbdMirrors(object): def test_rbd_mirror_is_installed(self, node, host): assert host.package("rbd-mirror").is_installed - @pytest.mark.no_docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_running_before_luminous(self, node, host): - service_name = "ceph-rbd-mirror@admin" - assert host.service(service_name).is_running - @pytest.mark.docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_running_docker_before_luminous(self, node, host): + def test_rbd_mirror_service_is_running_docker(self, node, host): service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( hostname=node["vars"]["inventory_hostname"] ) assert host.service(service_name).is_running - @pytest.mark.docker - @pytest.mark.from_luminous - def test_rbd_mirror_service_is_running_docker_from_luminous(self, node, host): + def test_rbd_mirror_service_is_running(self, node, host): service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( hostname=node["vars"]["inventory_hostname"] ) assert host.service(service_name).is_running - @pytest.mark.from_luminous - def test_rbd_mirror_service_is_running_from_luminous(self, node, host): - service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( - hostname=node["vars"]["inventory_hostname"] - ) - assert host.service(service_name).is_running - - @pytest.mark.no_docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_enabled_before_luminous(self, node, host): - service_name = "ceph-rbd-mirror@admin" - assert host.service(service_name).is_enabled - - @pytest.mark.docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_enabled_docker_before_luminous(self, node, host): + def test_rbd_mirror_service_is_enabled(self, node, host): service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( hostname=node["vars"]["inventory_hostname"] ) assert host.service(service_name).is_enabled - @pytest.mark.from_luminous - def test_rbd_mirror_service_is_enabled_from_luminous(self, node, host): - service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( - hostname=node["vars"]["inventory_hostname"] - ) - assert host.service(service_name).is_enabled - - @pytest.mark.from_luminous def test_rbd_mirror_is_up(self, node, host): - ceph_release_num=node['ceph_release_num'] - ceph_stable_release=node['ceph_stable_release'] hostname=node["vars"]["inventory_hostname"] cluster=node["cluster_name"] - rolling_update=node["rolling_update"] daemons = [] if node['docker']: docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname) @@ -80,10 +45,6 @@ class TestRbdMirrors(object): output = host.check_output(cmd) status = json.loads(output) daemon_ids = [i for i in status["servicemap"]["services"]["rbd-mirror"]["daemons"].keys() if i != "summary"] - if ceph_release_num[ceph_stable_release] > ceph_release_num['luminous'] or (ceph_release_num[ceph_stable_release] == ceph_release_num['luminous'] and rolling_update=='True'): - for daemon_id in daemon_ids: - daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"]) - result = hostname in daemons - else: - result = hostname in daemon_ids - assert result \ No newline at end of file + for daemon_id in daemon_ids: + daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"]) + assert hostname in daemons \ No newline at end of file diff --git a/tests/functional/tests/rgw/test_rgw.py b/tests/functional/tests/rgw/test_rgw.py index 69bd001f2..c940d260b 100644 --- a/tests/functional/tests/rgw/test_rgw.py +++ b/tests/functional/tests/rgw/test_rgw.py @@ -22,7 +22,6 @@ class TestRGWs(object): ) assert host.service(service_name).is_enabled - @pytest.mark.from_luminous def test_rgw_is_up(self, node, host): hostname=node["vars"]["inventory_hostname"] cluster=node["cluster_name"] diff --git a/tox.ini b/tox.ini index 0d1f2ddb9..5d8b0ffdc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,5 @@ [tox] -envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation} - {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch,lvm_osds_container,lvm_batch_container} +envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation,filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch} infra_lv_create skipsdist = True @@ -183,11 +182,6 @@ setenv= shrink_osd: COPY_ADMIN_KEY = True rhcs: CEPH_STABLE_RELEASE = luminous - jewel: CEPH_STABLE_RELEASE = jewel - jewel: CEPH_DOCKER_IMAGE_TAG = latest-jewel - jewel: UPDATE_CEPH_STABLE_RELEASE = luminous - jewel: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous - jewel: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel luminous: CEPH_STABLE_RELEASE = luminous luminous: CEPH_DOCKER_IMAGE_TAG = latest-luminous luminous: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-luminous From 60bc1e38db0e797ad6553584927f86486ae09c19 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Sat, 13 Oct 2018 10:42:18 +0200 Subject: [PATCH 070/105] handler: fix osd containers handler `ceph_osd_container_stat` might not be set on other osd node. We must ensure we are on the last node before trying to evaluate `ceph_osd_container_stat`. Signed-off-by: Guillaume Abrioux --- roles/ceph-handler/handlers/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-handler/handlers/main.yml b/roles/ceph-handler/handlers/main.yml index bc6732eb0..95132e6cb 100644 --- a/roles/ceph-handler/handlers/main.yml +++ b/roles/ceph-handler/handlers/main.yml @@ -105,8 +105,8 @@ - osd_group_name in group_names - containerized_deployment - not rolling_update - - ceph_osd_container_stat.get('rc') == 0 - inventory_hostname == groups.get(osd_group_name) | last + - ceph_osd_container_stat.get('rc') == 0 - ceph_osd_container_stat.get('stdout_lines', [])|length != 0 - handler_health_osd_check - hostvars[item]['_osd_handler_called'] | default(False) From dc020058a334e8168e13740b2600241eba63470c Mon Sep 17 00:00:00 2001 From: "binhong.hua" Date: Wed, 10 Oct 2018 23:24:30 +0800 Subject: [PATCH 071/105] vagrantfile: remove disk path of OSD nodes osd node's disks will remain on vagrant host,when run "vagrant destroy", because we use time as a part of disk path, and time on delete not equal time on create. we already use random_hostname in Libvirt backend,it will create disk use the hostname as a part of diskname. for example: vagrant_osd2_1539159988_065f15e3e1fa6ceb0770-hda.qcow2. Signed-off-by: binhong.hua --- Vagrantfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 314556c6f..99fd9bdee 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -2,7 +2,6 @@ # vi: set ft=ruby : require 'yaml' -require 'time' VAGRANTFILE_API_VERSION = '2' config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml')) @@ -33,7 +32,6 @@ DEBUG = settings['debug'] ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode') DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false) -DISK_UUID = Time.now.utc.to_i ansible_provision = proc do |ansible| @@ -516,7 +514,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # always make /dev/sd{a/b/c} so that CI can ensure that # virtualbox and libvirt will have the same devices to use for OSDs (0..2).each do |d| - lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '50G', :bus => "ide" + lv.storage :file, :device => "hd#{driverletters[d]}", :size => '50G', :bus => "ide" end lv.memory = MEMORY lv.random_hostname = true From 25bde39539f8a393d4fc076e90cce6b53df31519 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 15 Oct 2018 14:30:33 +0200 Subject: [PATCH 072/105] Update Mergify configuration to v2 Signed-off-by: Julien Danjou --- .mergify.yml | 103 ++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 86 insertions(+), 17 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index c5e86447e..99d7991ec 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,17 +1,86 @@ -rules: - default: - protection: - required_status_checks: - strict: true - contexts: - - "Testing: ceph-ansible PR Pipeline" - required_pull_request_reviews: - required_approving_review_count: 1 - merge_strategy: - method: rebase - automated_backport_labels: - backport-stable-3.0: stable-3.0 - backport-stable-3.1: stable-3.1 - disabling_label: DNM - disabling_files: - - .mergify.yml +pull_request_rules: + - name: automatic merge + conditions: + - label!=DNM + - '#approved-reviews-by>=1' + - 'status-success=Testing: ceph-ansible PR Pipeline' + actions: + merge: + method: rebase + rebase_fallback: merge + strict: smart + dismiss_reviews: {} + delete_head_branch: {} + - name: automatic merge on skip ci + conditions: + - label!=DNM + - title=~\[skip ci\] + - '#approved-reviews-by>=1' + actions: + merge: + method: rebase + rebase_fallback: merge + strict: smart + dismiss_reviews: {} + delete_head_branch: {} + - name: automerge backport 3.0 + conditions: + - author=mergify[bot] + - base=stable-3.0 + - label!=DNM + - 'status-success=Testing: ceph-ansible PR Pipeline' + actions: + merge: + method: rebase + rebase_fallback: merge + strict: smart + dismiss_reviews: {} + delete_head_branch: {} + - name: automerge backport 3.1 + conditions: + - author=mergify[bot] + - base=stable-3.1 + - label!=DNM + - 'status-success=Testing: ceph-ansible PR Pipeline' + actions: + merge: + method: rebase + rebase_fallback: merge + strict: smart + dismiss_reviews: {} + delete_head_branch: {} + - name: automerge backport 3.2 + conditions: + - author=mergify[bot] + - base=stable-3.2 + - label!=DNM + - 'status-success=Testing: ceph-ansible PR Pipeline' + actions: + merge: + method: rebase + rebase_fallback: merge + strict: smart + dismiss_reviews: {} + delete_head_branch: {} +# Backports + - actions: + backport: + branches: + - stable-3.0 + conditions: + - label=backport-stable-3.0 + name: backport stable-3.0 + - actions: + backport: + branches: + - stable-3.1 + conditions: + - label=backport-stable-3.1 + name: backport stable-3.1 + - actions: + backport: + branches: + - stable-3.2 + conditions: + - label=backport-stable-3.2 + name: backport stable-3.2 From 07140f8063ec078aa6eee6c95b36e3bb839122e0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 15 Oct 2018 15:17:26 +0200 Subject: [PATCH 073/105] Mergify: fix regexp operator --- .mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mergify.yml b/.mergify.yml index 99d7991ec..43ddec9f5 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -14,7 +14,7 @@ pull_request_rules: - name: automatic merge on skip ci conditions: - label!=DNM - - title=~\[skip ci\] + - title~=\[skip ci\] - '#approved-reviews-by>=1' actions: merge: From 55334baa0cf2ee8cf73776b08b5a4ca2e6a5c542 Mon Sep 17 00:00:00 2001 From: Nan Li Date: Fri, 12 Oct 2018 11:26:04 +0800 Subject: [PATCH 074/105] docker-ce is used in aarch64 instead of docker engine Signed-off-by: Nan Li --- .../tasks/pre_requisites/debian_prerequisites.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ceph-docker-common/tasks/pre_requisites/debian_prerequisites.yml b/roles/ceph-docker-common/tasks/pre_requisites/debian_prerequisites.yml index 8be7c2cec..e777f52de 100644 --- a/roles/ceph-docker-common/tasks/pre_requisites/debian_prerequisites.yml +++ b/roles/ceph-docker-common/tasks/pre_requisites/debian_prerequisites.yml @@ -48,7 +48,7 @@ - name: install docker on debian package: - name: docker-engine + name: "{{ 'docker-ce' if ansible_architecture == 'aarch64' else 'docker-engine' }}" state: present update_cache: yes From fc6f1ae0998849e3207f7516dd0fdc4eb03aa23d Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 15 Oct 2018 23:54:47 +0200 Subject: [PATCH 075/105] doc: update default osd_objectstore value since dc3319c3c4e2fb58cb1b5e6c60f165ed28260dc8 this should be reflected in the doc. Signed-off-by: Guillaume Abrioux --- docs/source/osds/scenarios.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst index c6f06a574..87d17fae4 100644 --- a/docs/source/osds/scenarios.rst +++ b/docs/source/osds/scenarios.rst @@ -38,7 +38,7 @@ Other (optional) supported settings: - ``osd_objectstore``: Set the Ceph *objectstore* for the OSD. Available options are ``filestore`` or ``bluestore``. You can only select ``bluestore`` with - the Ceph release is luminous or greater. Defaults to ``filestore`` if unset. + the Ceph release is luminous or greater. Defaults to ``bluestore`` if unset. - ``dmcrypt``: Enable Ceph's encryption on OSDs using ``dmcrypt``. Defaults to ``false`` if unset. From b953965399231393c56a1d4bdcf8a21ccafbf9c4 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 15 Oct 2018 15:32:17 +0200 Subject: [PATCH 076/105] handler: remove some leftover in restart_*_daemon.sh.j2 Remove some legacy in those restart script. Signed-off-by: Guillaume Abrioux --- roles/ceph-handler/templates/restart_mds_daemon.sh.j2 | 1 - roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 | 1 - roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 | 1 - roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 | 1 - 4 files changed, 4 deletions(-) diff --git a/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 index f265546f9..db817e6d8 100644 --- a/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 @@ -14,7 +14,6 @@ $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok # First, restart the daemon systemctl restart ceph-mds@${MDS_NAME} -COUNT=10 # Wait and ensure the socket exists after restarting the daemds while [ $RETRIES -ne 0 ]; do $DOCKER_EXEC test -S $SOCKET && exit 0 diff --git a/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 index 2b06a04af..d9dc72801 100644 --- a/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 @@ -15,7 +15,6 @@ systemctl reset-failed ceph-mgr@${MGR_NAME} # First, restart the daemon systemctl restart ceph-mgr@${MGR_NAME} -COUNT=10 # Wait and ensure the socket exists after restarting the daemds while [ $RETRIES -ne 0 ]; do $DOCKER_EXEC test -S $SOCKET && exit 0 diff --git a/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 index 5828e1ac6..628b05fe4 100644 --- a/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 @@ -11,7 +11,6 @@ DOCKER_EXEC="docker exec ceph-nfs-{{ ansible_hostname }}" # First, restart the daemon {% if containerized_deployment -%} systemctl restart $NFS_NAME -COUNT=10 # Wait and ensure the pid exists after restarting the daemon while [ $RETRIES -ne 0 ]; do $DOCKER_EXEC test -f $PID && exit 0 diff --git a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 index 44d019bfe..52113b660 100644 --- a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 @@ -14,7 +14,6 @@ $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_ho # First, restart the daemon systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME} -COUNT=10 # Wait and ensure the socket exists after restarting the daemon while [ $RETRIES -ne 0 ]; do $DOCKER_EXEC test -S $SOCKET && exit 0 From 5fa2b6993bf9f42e0f09c22b3d845dbe92f4d03f Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 15 Oct 2018 23:42:16 +0200 Subject: [PATCH 077/105] contrib: add a bash script to snapshort libvirt vms This script is still 'work in progress' but could be used to make snapshot of Libvirt VMs. This can save some times when deploying again and again. Signed-off-by: Guillaume Abrioux --- contrib/snapshot_vms.sh | 73 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 contrib/snapshot_vms.sh diff --git a/contrib/snapshot_vms.sh b/contrib/snapshot_vms.sh new file mode 100644 index 000000000..d5d2b7217 --- /dev/null +++ b/contrib/snapshot_vms.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +create_snapshots() { + local pattern=$1 + for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do + sudo virsh shutdown "${vm}" + wait_for_shutoff "${vm}" + sudo virsh snapshot-create "${vm}" + sudo virsh start "${vm}" + done +} + +delete_snapshots() { + local pattern=$1 + for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do + for snapshot in $(sudo virsh snapshot-list "${vm}" --name); do + echo "deleting snapshot ${snapshot} (vm: ${vm})" + sudo virsh snapshot-delete "${vm}" "${snapshot}" + done + done +} + +revert_snapshots() { + local pattern=$1 + for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do + echo "restoring last snapshot for ${vm}" + sudo virsh snapshot-revert "${vm}" --current + sudo virsh start "${vm}" + done +} + +wait_for_shutoff() { + local vm=$1 + local retries=60 + local delay=2 + + until test "${retries}" -eq 0 + do + echo "waiting for ${vm} to be shut off... #${retries}" + sleep "${delay}" + let "retries=$retries-1" + local current_state=$(sudo virsh domstate "${vm}") + test "${current_state}" == "shut off" && return + done + echo couldnt shutoff "${vm}" + exit 1 +} + +while :; do + case $1 in + -d|--delete) + delete_snapshots "$2" + exit + ;; + -i|--interactive) + INTERACTIVE=TRUE + ;; + -s|--snapshot) + create_snapshots "$2" + ;; + -r|--revert) + revert_snapshots "$2" + ;; + --) + shift + break + ;; + *) + break + esac + + shift +done From ac37a0d0cd4df8d8e2ee25a256561c9094d9a074 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 11 Oct 2018 12:26:04 +0200 Subject: [PATCH 078/105] ceph-defaults: set ceph_stable_openstack_release_uca to queens Liberty is no longer available in the UCA. The last available release there is currently Queens. Signed-off-by: Christian Berendt --- docs/source/installation/methods.rst | 2 +- group_vars/all.yml.sample | 2 +- group_vars/rhcs.yml.sample | 2 +- roles/ceph-defaults/defaults/main.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/installation/methods.rst b/docs/source/installation/methods.rst index bb54005d5..682b6d0a0 100644 --- a/docs/source/installation/methods.rst +++ b/docs/source/installation/methods.rst @@ -41,7 +41,7 @@ UCA repository If ``ceph_repository`` is set to ``uca``, packages you will be by default installed from http://ubuntu-cloud.archive.canonical.com/ubuntu, this can be changed by tweaking ``ceph_stable_repo_uca``. You can also decide which OpenStack version the Ceph packages should come from by tweaking ``ceph_stable_openstack_release_uca``. -For example, ``ceph_stable_openstack_release_uca: liberty``. +For example, ``ceph_stable_openstack_release_uca: queens``. Dev repository ~~~~~~~~~~~~~~ diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index e6f95f4fa..cb3cbc72d 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -213,7 +213,7 @@ dummy: # # #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" -#ceph_stable_openstack_release_uca: liberty +#ceph_stable_openstack_release_uca: queens #ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" # REPOSITORY: openSUSE OBS diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index cabb77279..64dda8845 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -213,7 +213,7 @@ ceph_rhcs_version: 3 # # #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" -#ceph_stable_openstack_release_uca: liberty +#ceph_stable_openstack_release_uca: queens #ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" # REPOSITORY: openSUSE OBS diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 511b2294b..307328aa5 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -205,7 +205,7 @@ ceph_rhcs_cdn_debian_repo_version: "/3-release/" # for GA, later for updates use # # #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" -#ceph_stable_openstack_release_uca: liberty +#ceph_stable_openstack_release_uca: queens #ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" # REPOSITORY: openSUSE OBS From f0b2d82695b61eda22a8ed791269264980b2d805 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 16 Oct 2018 15:09:48 +0200 Subject: [PATCH 079/105] infra: fix wrong condition on firewalld start task a non skipped task won't have the `skipped` attribute, so `start firewalld` task will complain about that. Indeed, `skipped` and `rc` attributes won't exist since the first task `check firewalld installation on redhat or suse` won't be skipped in case of non-containerized deployment. Fixes: #3236 Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1541840 Signed-off-by: Guillaume Abrioux --- roles/ceph-infra/tasks/configure_firewall.yml | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index 53329af34..d0075979c 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -18,8 +18,7 @@ state: started enabled: yes when: - - not firewalld_pkg_query.skipped - - firewalld_pkg_query.rc == 0 + - firewalld_pkg_query.get('rc', 1) == 0 or is_atomic - name: open monitor ports @@ -34,7 +33,7 @@ when: - mon_group_name is defined - mon_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -50,7 +49,7 @@ when: - mgr_group_name is defined - mgr_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -69,7 +68,7 @@ when: - osd_group_name is defined - osd_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -85,7 +84,7 @@ when: - rgw_group_name is defined - rgw_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -101,7 +100,7 @@ when: - mds_group_name is defined - mds_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -117,7 +116,7 @@ when: - nfs_group_name is defined - nfs_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -133,7 +132,7 @@ when: - nfs_group_name is defined - nfs_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -149,7 +148,7 @@ when: - restapi_group_name is defined - restapi_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -165,7 +164,7 @@ when: - rbdmirror_group_name is defined - rbdmirror_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall @@ -181,7 +180,7 @@ when: - iscsi_group_name is defined - iscsi_group_name in group_names - - firewalld_pkg_query.rc == 0 + - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic) tags: - firewall From 3e0fa3bc180f10d72868de74b09bbf6355359485 Mon Sep 17 00:00:00 2001 From: Andy McCrae Date: Fri, 5 Oct 2018 14:36:36 +0100 Subject: [PATCH 080/105] Add ability to use a different client container Currently a throw-away container is built to run ceph client commands to setup users, pools & auth keys. This utilises the same base ceph container which has all the ceph services inside it. This PR allows the use of a separate container if the deployer wishes - but defaults to use the same full ceph container. This can be used for different architectures or distributions, which may support the the Ceph client, but not Ceph server, and allows the deployer to build and specify a separate client container if need be. Signed-off-by: Andy McCrae --- group_vars/all.yml.sample | 4 ++++ group_vars/rhcs.yml.sample | 4 ++++ roles/ceph-client/tasks/create_users_keys.yml | 2 +- roles/ceph-defaults/defaults/main.yml | 4 ++++ roles/ceph-docker-common/tasks/main.yml | 2 +- 5 files changed, 14 insertions(+), 2 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index cb3cbc72d..d80ac21c2 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -516,6 +516,10 @@ dummy: #ceph_docker_image: "ceph/daemon" #ceph_docker_image_tag: latest #ceph_docker_registry: docker.io +## Client only docker image - defaults to {{ ceph_docker_image }} +#ceph_client_docker_image: "{{ ceph_docker_image }}" +#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" +#ceph_client_docker_registry: "{{ ceph_docker_registry }}" #ceph_docker_enable_centos_extra_repo: false #ceph_docker_on_openstack: false #containerized_deployment: False diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 64dda8845..68b69ed40 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -516,6 +516,10 @@ ceph_rhcs_version: 3 ceph_docker_image: "rhceph-3-rhel7" ceph_docker_image_tag: "latest" ceph_docker_registry: "registry.access.redhat.com/rhceph/" +## Client only docker image - defaults to {{ ceph_docker_image }} +#ceph_client_docker_image: "{{ ceph_docker_image }}" +#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" +#ceph_client_docker_registry: "{{ ceph_docker_registry }}" #ceph_docker_enable_centos_extra_repo: false #ceph_docker_on_openstack: false #containerized_deployment: False diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index 95ac72577..72f11d0e7 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -33,7 +33,7 @@ -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \ --name ceph-create-keys \ --entrypoint=sleep \ - {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + {{ ceph_client_docker_registry}}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} \ 300 changed_when: false when: diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 307328aa5..7277a0171 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -508,6 +508,10 @@ docker: false ceph_docker_image: "ceph/daemon" ceph_docker_image_tag: latest ceph_docker_registry: docker.io +## Client only docker image - defaults to {{ ceph_docker_image }} +ceph_client_docker_image: "{{ ceph_docker_image }}" +ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" +ceph_client_docker_registry: "{{ ceph_docker_registry }}" ceph_docker_enable_centos_extra_repo: false ceph_docker_on_openstack: false containerized_deployment: False diff --git a/roles/ceph-docker-common/tasks/main.yml b/roles/ceph-docker-common/tasks/main.yml index d0110c8a7..e04450409 100644 --- a/roles/ceph-docker-common/tasks/main.yml +++ b/roles/ceph-docker-common/tasks/main.yml @@ -82,7 +82,7 @@ - fetch_container_image - name: get ceph version - command: docker run --rm --entrypoint /usr/bin/ceph {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --version + command: docker run --rm --entrypoint /usr/bin/ceph {{ ceph_client_docker_registry}}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} --version changed_when: false check_mode: no register: ceph_version From 680574ed4c86018387619cc108302759738f963b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 27 Sep 2018 16:29:22 +0200 Subject: [PATCH 081/105] ceph-fetch-keys: refact MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commits simplies the usage of the ceph-fetch-keys role. The role now has a nicer way to find various ceph keys and fetch them on the ansible server. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1624962 Signed-off-by: Sébastien Han --- roles/ceph-fetch-keys/tasks/main.yml | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/roles/ceph-fetch-keys/tasks/main.yml b/roles/ceph-fetch-keys/tasks/main.yml index 61f2f3a58..5957093c0 100644 --- a/roles/ceph-fetch-keys/tasks/main.yml +++ b/roles/ceph-fetch-keys/tasks/main.yml @@ -1,24 +1,16 @@ --- -- name: find ceph keys +- name: lookup keys in /etc/ceph shell: ls -1 /etc/ceph/*.keyring changed_when: false register: ceph_keys - check_mode: no -- name: set keys permissions - file: - path: "{{ item }}" - mode: "{{ ceph_keyring_permissions }}" - owner: root - group: root - with_items: - - "{{ ceph_keys.stdout_lines }}" -- name: copy keys to the ansible server +- name: "copy ceph user and bootstrap keys to the ansible server in {{ fetch_directory }}/{{ fsid }}/" fetch: src: "{{ item }}" dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}" flat: yes + fail_on_missing: false run_once: true with_items: - "{{ ceph_keys.stdout_lines }}" @@ -26,3 +18,4 @@ - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring" - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring" - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" + - "/var/lib/ceph/bootstrap-mgr/{{ cluster }}.keyring" \ No newline at end of file From fbd878c8d5d20535c7f4eee685e84f70e1faf617 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 27 Sep 2018 16:31:22 +0200 Subject: [PATCH 082/105] infra: rename osd-configure to add-osd and improve it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The playbook has various improvements: * run ceph-validate role before doing anything * run ceph-fetch-keys only on the first monitor of the inventory list * set noup flag so PGs get distributed once all the new OSDs have been added to the cluster and unset it when they are up and running Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1624962 Signed-off-by: Sébastien Han --- infrastructure-playbooks/add-osd.yml | 94 ++++++++++++++++++++++ infrastructure-playbooks/osd-configure.yml | 20 ----- roles/ceph-osd/tasks/main.yml | 3 + 3 files changed, 97 insertions(+), 20 deletions(-) create mode 100644 infrastructure-playbooks/add-osd.yml delete mode 100644 infrastructure-playbooks/osd-configure.yml diff --git a/infrastructure-playbooks/add-osd.yml b/infrastructure-playbooks/add-osd.yml new file mode 100644 index 000000000..4a99cd380 --- /dev/null +++ b/infrastructure-playbooks/add-osd.yml @@ -0,0 +1,94 @@ +--- +# This playbook is used to add a new OSD to +# an existing cluster without the need for running +# the ceph-docker-common or ceph-common and ceph-mon role again against all +# of the existing monitors. +# +# It can run from any machine. Even if the fetch directory is not present +# it will be created. +# +# Ensure that all monitors are present in the mons +# group in your inventory so that the ceph configuration file +# is created correctly for the new OSD(s). +# +# It is expected to edit your inventory file to only point to the OSD hosts +# you want to play the playbook on. So you need to comment already deployed OSD +# and let uncommented the new OSDs. +# +- hosts: + - mons + - osds + + gather_facts: False + + vars: + delegate_facts_host: True + + pre_tasks: + - name: gather facts + setup: + when: + - not delegate_facts_host | bool + + - name: gather and delegate facts + setup: + delegate_to: "{{ item }}" + delegate_facts: True + with_items: + - "{{ groups['mons'] }}" + - "{{ groups['osds'] }}" + run_once: True + when: + - delegate_facts_host | bool + + roles: + - ceph-defaults + - ceph-validate + +- hosts: "{{ groups['mons'][0] }}" + gather_facts: False + become: True + + roles: + - role: ceph-defaults + - role: ceph-fetch-keys + + post_tasks: + - name: set_fact docker_exec_cmd if containerized_deployment + set_fact: + docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + when: containerized_deployment + +- hosts: osds + gather_facts: False + become: True + + pre_tasks: + # this task is needed so we can skip the openstack_config.yml include in roles/ceph-osd + - name: set_fact add_osd + set_fact: + add_osd: True + + - name: set noup flag + command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup" + delegate_to: "{{ groups['mons'][0] }}" + run_once: True + changed_when: False + + roles: + - role: ceph-defaults + - role: ceph-handler + - role: ceph-infra + - role: ceph-docker-common + when: containerized_deployment | bool + - role: ceph-common + when: not containerized_deployment | bool + - role: ceph-config + - role: ceph-osd + + post_tasks: + - name: unset noup flag + command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup" + delegate_to: "{{ groups['mons'][0] }}" + run_once: True + changed_when: False \ No newline at end of file diff --git a/infrastructure-playbooks/osd-configure.yml b/infrastructure-playbooks/osd-configure.yml deleted file mode 100644 index b35e12142..000000000 --- a/infrastructure-playbooks/osd-configure.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -# This playbook is used to add a new OSD to -# an existing cluster without the need for running -# the ceph-common or ceph-mon role again against all -# of the existing monitors. -# -# Ensure that all monitors are present in the mons -# group in your inventory so that the ceph.conf is -# created correctly for the new OSD. -- hosts: mons - become: True - roles: - - ceph-defaults - - ceph-fetch-keys - -- hosts: osds - become: True - roles: - - ceph-defaults - - ceph-osd diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 3b88fe19d..9e098c06f 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -80,6 +80,7 @@ openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}" with_items: "{{ openstack_keys }}" when: + - not add_osd|default(False) - openstack_config - item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap @@ -87,11 +88,13 @@ set_fact: openstack_keys: "{{ openstack_keys_tmp }}" when: + - not add_osd|default(False) - openstack_keys_tmp is defined # Create the pools listed in openstack_pools - name: include openstack_config.yml include_tasks: openstack_config.yml when: + - not add_osd|default(False) - openstack_config - inventory_hostname == groups[osd_group_name] | last From 3632b2600598f94663bed3c25a7ddb4a7558e8e2 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 16 Oct 2018 17:05:10 +0200 Subject: [PATCH 083/105] tests: add tests for day-2-operation playbook Adding testing scenarios for day-2-operation playbook. Steps: - deploys a cluster, - run testinfra, - test idempotency, - add a new osd node, - run testinfra Signed-off-by: Guillaume Abrioux --- .../centos/7/add-osds-container/Vagrantfile | 1 + .../7/add-osds-container/ceph-override.json | 1 + .../7/add-osds-container/group_vars/all | 21 ++++++ .../centos/7/add-osds-container/hosts | 8 ++ .../centos/7/add-osds-container/hosts-2 | 9 +++ .../add-osds-container/vagrant_variables.yml | 73 +++++++++++++++++++ .../functional/centos/7/add-osds/Vagrantfile | 1 + .../centos/7/add-osds/ceph-override.json | 1 + .../centos/7/add-osds/group_vars/all | 21 ++++++ tests/functional/centos/7/add-osds/hosts | 8 ++ tests/functional/centos/7/add-osds/hosts-2 | 9 +++ .../centos/7/add-osds/vagrant_variables.yml | 73 +++++++++++++++++++ tox.ini | 27 ++++++- 13 files changed, 252 insertions(+), 1 deletion(-) create mode 120000 tests/functional/centos/7/add-osds-container/Vagrantfile create mode 120000 tests/functional/centos/7/add-osds-container/ceph-override.json create mode 100644 tests/functional/centos/7/add-osds-container/group_vars/all create mode 100644 tests/functional/centos/7/add-osds-container/hosts create mode 100644 tests/functional/centos/7/add-osds-container/hosts-2 create mode 100644 tests/functional/centos/7/add-osds-container/vagrant_variables.yml create mode 120000 tests/functional/centos/7/add-osds/Vagrantfile create mode 120000 tests/functional/centos/7/add-osds/ceph-override.json create mode 100644 tests/functional/centos/7/add-osds/group_vars/all create mode 100644 tests/functional/centos/7/add-osds/hosts create mode 100644 tests/functional/centos/7/add-osds/hosts-2 create mode 100644 tests/functional/centos/7/add-osds/vagrant_variables.yml diff --git a/tests/functional/centos/7/add-osds-container/Vagrantfile b/tests/functional/centos/7/add-osds-container/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/add-osds-container/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/add-osds-container/ceph-override.json b/tests/functional/centos/7/add-osds-container/ceph-override.json new file mode 120000 index 000000000..775cb006a --- /dev/null +++ b/tests/functional/centos/7/add-osds-container/ceph-override.json @@ -0,0 +1 @@ +../cluster/ceph-override.json \ No newline at end of file diff --git a/tests/functional/centos/7/add-osds-container/group_vars/all b/tests/functional/centos/7/add-osds-container/group_vars/all new file mode 100644 index 000000000..c5e1575ea --- /dev/null +++ b/tests/functional/centos/7/add-osds-container/group_vars/all @@ -0,0 +1,21 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.55.0/24" +cluster_network: "192.168.56.0/24" +monitor_interface: eth1 +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: /dev/sda + - data: /dev/sdb +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/add-osds-container/hosts b/tests/functional/centos/7/add-osds-container/hosts new file mode 100644 index 000000000..d6c89012a --- /dev/null +++ b/tests/functional/centos/7/add-osds-container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/add-osds-container/hosts-2 b/tests/functional/centos/7/add-osds-container/hosts-2 new file mode 100644 index 000000000..288617eda --- /dev/null +++ b/tests/functional/centos/7/add-osds-container/hosts-2 @@ -0,0 +1,9 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 +osd1 diff --git a/tests/functional/centos/7/add-osds-container/vagrant_variables.yml b/tests/functional/centos/7/add-osds-container/vagrant_variables.yml new file mode 100644 index 000000000..1e72ca2fb --- /dev/null +++ b/tests/functional/centos/7/add-osds-container/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.53 +cluster_subnet: 192.168.56 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/centos/7/add-osds/Vagrantfile b/tests/functional/centos/7/add-osds/Vagrantfile new file mode 120000 index 000000000..dfd7436c9 --- /dev/null +++ b/tests/functional/centos/7/add-osds/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/centos/7/add-osds/ceph-override.json b/tests/functional/centos/7/add-osds/ceph-override.json new file mode 120000 index 000000000..775cb006a --- /dev/null +++ b/tests/functional/centos/7/add-osds/ceph-override.json @@ -0,0 +1 @@ +../cluster/ceph-override.json \ No newline at end of file diff --git a/tests/functional/centos/7/add-osds/group_vars/all b/tests/functional/centos/7/add-osds/group_vars/all new file mode 100644 index 000000000..6896bd97e --- /dev/null +++ b/tests/functional/centos/7/add-osds/group_vars/all @@ -0,0 +1,21 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.53.0/24" +cluster_network: "192.168.54.0/24" +monitor_interface: eth1 +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: /dev/sda + - data: /dev/sdb +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_size: 1 diff --git a/tests/functional/centos/7/add-osds/hosts b/tests/functional/centos/7/add-osds/hosts new file mode 100644 index 000000000..d6c89012a --- /dev/null +++ b/tests/functional/centos/7/add-osds/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 diff --git a/tests/functional/centos/7/add-osds/hosts-2 b/tests/functional/centos/7/add-osds/hosts-2 new file mode 100644 index 000000000..288617eda --- /dev/null +++ b/tests/functional/centos/7/add-osds/hosts-2 @@ -0,0 +1,9 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 +osd1 diff --git a/tests/functional/centos/7/add-osds/vagrant_variables.yml b/tests/functional/centos/7/add-osds/vagrant_variables.yml new file mode 100644 index 000000000..270589585 --- /dev/null +++ b/tests/functional/centos/7/add-osds/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# Deploy RESTAPI on each of the Monitors +restapi: true + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.53 +cluster_subnet: 192.168.54 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tox.ini b/tox.ini index 5d8b0ffdc..6121e6175 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation,filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch} +envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation,filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch,add_osds,add_osds_container} infra_lv_create skipsdist = True @@ -144,6 +144,22 @@ commands= testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests +[add-osds] +commands= + ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/tests/functional/setup.yml + cp {toxinidir}/infrastructure-playbooks/add-osd.yml {toxinidir}/add-osd.yml + ansible-playbook -vv -i {changedir}/hosts-2 --limit osd1 {toxinidir}/add-osd.yml --extra-vars "\ + ireallymeanit=yes \ + fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ + ceph_stable_release={env:CEPH_STABLE_RELEASE:mimic} \ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \ + ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + " + testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests + [testenv] whitelist_externals = vagrant @@ -151,6 +167,7 @@ whitelist_externals = pip cp sleep + rm passenv=* sitepackages=True setenv= @@ -172,6 +189,8 @@ setenv= purge_bluestore_osds_container: PURGE_PLAYBOOK = purge-docker-cluster.yml purge_filestore_osds_container: PLAYBOOK = site-docker.yml.sample purge_filestore_osds_container: PURGE_PLAYBOOK = purge-docker-cluster.yml + add_osds: PLAYBOOK = site.yml.sample + add_osds_container: PLAYBOOK = site-docker.yml.sample filestore_osds_container: PLAYBOOK = site-docker.yml.sample bluestore_osds_container: PLAYBOOK = site-docker.yml.sample @@ -203,6 +222,8 @@ setenv= lvm_batch_container: CEPH_STABLE_RELEASE = luminous update_cluster: ROLLING_UPDATE = True update_docker_cluster: ROLLING_UPDATE = True + add_osds: CEPH_STABLE_RELEASE = luminous + add_osds_container: CEPH_STABLE_RELEASE = luminous deps= -r{toxinidir}/tests/requirements.txt changedir= # tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using non-collocated OSD scenario @@ -239,6 +260,8 @@ changedir= bluestore_lvm_osds_container: {toxinidir}/tests/functional/centos/7/bs-lvm-osds-container purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds ooo_collocation: {toxinidir}/tests/functional/centos/7/ooo-collocation + add_osds: {toxinidir}/tests/functional/centos/7/add-osds + add_osds_container: {toxinidir}/tests/functional/centos/7/add-osds-container commands= rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup" @@ -312,5 +335,7 @@ commands= shrink_mon_container: {[shrink-mon]commands} shrink_osd: {[shrink-osd]commands} shrink_osd_container: {[shrink-osd]commands} + add_osds: {[add-osds]commands} + add_osds_container: {[add-osds]commands} vagrant destroy --force From d6e79044efbbfad9ba1b809276baf03284780392 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 24 May 2018 10:47:29 -0700 Subject: [PATCH 084/105] infra: add a gather-ceph-logs.yml playbook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a gather-ceph-logs.yml which will log onto all the machines from your inventory and will gather ceph logs. This is not intended to work on containerized environments since the logs are stored in journald. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1582280 Signed-off-by: Sébastien Han --- infrastructure-playbooks/gather-ceph-logs.yml | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 infrastructure-playbooks/gather-ceph-logs.yml diff --git a/infrastructure-playbooks/gather-ceph-logs.yml b/infrastructure-playbooks/gather-ceph-logs.yml new file mode 100644 index 000000000..830c2ab83 --- /dev/null +++ b/infrastructure-playbooks/gather-ceph-logs.yml @@ -0,0 +1,42 @@ +- hosts: + - mons + - agents + - osds + - mdss + - rgws + - nfss + - restapis + - rbdmirrors + - clients + - mgrs + - iscsi-gws + - iscsigws + + gather_facts: false + become: yes + + tasks: + - name: create a temp directory + local_action: + module: tempfile + state: directory + prefix: ceph_ansible + run_once: true + register: localtempfile + + - name: set_fact lookup_ceph_config - lookup keys, conf and logs + shell: ls -1 {{ item }} + register: ceph_collect + changed_when: false + with_items: + - /etc/ceph/* + - /var/log/ceph/* + + - name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible + fetch: + src: "{{ item }}" + dest: "{{ localtempfile.path }}" + fail_on_missing: no + flat: no + with_items: + - "{{ ceph_collect.stdout_lines }}" \ No newline at end of file From b8418ebd179e92d6666d53f0478098cdda0ff7ee Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 17 Oct 2018 13:57:09 +0200 Subject: [PATCH 085/105] add-osds: followup on 3632b26 Three fixes: - fix a typo in vagrant_variables that cause a networking issue for containerized scenario. - add containerized_deployment: true - remove a useless block of code: the fact docker_exec_cmd is set in ceph-defaults which is played right after. Signed-off-by: Guillaume Abrioux --- infrastructure-playbooks/add-osd.yml | 14 -------------- .../centos/7/add-osds-container/group_vars/all | 2 ++ .../7/add-osds-container/vagrant_variables.yml | 2 +- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/infrastructure-playbooks/add-osd.yml b/infrastructure-playbooks/add-osd.yml index 4a99cd380..edf15ebc5 100644 --- a/infrastructure-playbooks/add-osd.yml +++ b/infrastructure-playbooks/add-osd.yml @@ -45,20 +45,6 @@ - ceph-defaults - ceph-validate -- hosts: "{{ groups['mons'][0] }}" - gather_facts: False - become: True - - roles: - - role: ceph-defaults - - role: ceph-fetch-keys - - post_tasks: - - name: set_fact docker_exec_cmd if containerized_deployment - set_fact: - docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - when: containerized_deployment - - hosts: osds gather_facts: False become: True diff --git a/tests/functional/centos/7/add-osds-container/group_vars/all b/tests/functional/centos/7/add-osds-container/group_vars/all index c5e1575ea..9b1a6c3a5 100644 --- a/tests/functional/centos/7/add-osds-container/group_vars/all +++ b/tests/functional/centos/7/add-osds-container/group_vars/all @@ -1,6 +1,8 @@ --- +docker: True ceph_origin: repository ceph_repository: community +containerized_deployment: true cluster: ceph public_network: "192.168.55.0/24" cluster_network: "192.168.56.0/24" diff --git a/tests/functional/centos/7/add-osds-container/vagrant_variables.yml b/tests/functional/centos/7/add-osds-container/vagrant_variables.yml index 1e72ca2fb..67b122496 100644 --- a/tests/functional/centos/7/add-osds-container/vagrant_variables.yml +++ b/tests/functional/centos/7/add-osds-container/vagrant_variables.yml @@ -22,7 +22,7 @@ restapi: true ceph_install_source: stable # SUBNETS TO USE FOR THE VMS -public_subnet: 192.168.53 +public_subnet: 192.168.55 cluster_subnet: 192.168.56 # MEMORY From b8ad35ceb99cdbd1644c79dd689b818f095ba8b8 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 16 Oct 2018 16:25:12 +0200 Subject: [PATCH 086/105] tests: test `test_all_docker_osds_are_up_and_in()` from mon nodes Let's get the osd tree from mons instead on osds. This way we don't have to predict an OSD container name. Signed-off-by: Guillaume Abrioux --- .../tests/mon/test_osds_from_mons.py | 28 ++++++++++++++ tests/functional/tests/osd/test_osds.py | 37 ------------------- 2 files changed, 28 insertions(+), 37 deletions(-) create mode 100644 tests/functional/tests/mon/test_osds_from_mons.py diff --git a/tests/functional/tests/mon/test_osds_from_mons.py b/tests/functional/tests/mon/test_osds_from_mons.py new file mode 100644 index 000000000..f8eeab74b --- /dev/null +++ b/tests/functional/tests/mon/test_osds_from_mons.py @@ -0,0 +1,28 @@ +import pytest +import json + + +class TestOsdsFromMons(object): + def _get_nb_osd_up(self, osd_tree): + nb_up = 0 + for n in osd_tree['nodes']: + if n['type'] == 'osd' and n['status'] == 'up': + nb_up += 1 + return nb_up + + @pytest.mark.no_docker + def test_all_osds_are_up_and_in(self, node, host): + cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 osd tree -f json".format(cluster=node["cluster_name"]) + output = json.loads(host.check_output(cmd)) + nb_osd_up = self._get_nb_osd_up(output) + assert int(node["num_osds"]) == int(nb_osd_up) + + @pytest.mark.docker + def test_all_docker_osds_are_up_and_in(self, node, host): + cmd = "sudo docker exec ceph-mon-{inventory_hostname} ceph --cluster={cluster} --connect-timeout 5 osd tree -f json".format( + cluster=node["cluster_name"], + inventory_hostname=node['vars']['inventory_hostname'] + ) + output = json.loads(host.check_output(cmd)) + nb_osd_up = self._get_nb_osd_up(output) + assert node["num_osds"] == nb_osd_up diff --git a/tests/functional/tests/osd/test_osds.py b/tests/functional/tests/osd/test_osds.py index 851c7d7f5..20fbeacb4 100644 --- a/tests/functional/tests/osd/test_osds.py +++ b/tests/functional/tests/osd/test_osds.py @@ -47,40 +47,3 @@ class TestOSDs(object): @pytest.mark.lvm_scenario def test_ceph_volume_systemd_is_installed(self, node, host): host.exists('ceph-volume-systemd') - - def _get_osd_id_from_host(self, node, osd_tree): - children = [] - for n in osd_tree['nodes']: - if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': - children = n['children'] - return children - - def _get_nb_up_osds_from_ids(self, node, osd_tree): - nb_up = 0 - ids = self._get_osd_id_from_host(node, osd_tree) - for n in osd_tree['nodes']: - if n['id'] in ids and n['status'] == 'up': - nb_up += 1 - return nb_up - - @pytest.mark.no_docker - def test_all_osds_are_up_and_in(self, node, host): - cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(cluster=node["cluster_name"]) - output = json.loads(host.check_output(cmd)) - assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output) - - @pytest.mark.docker - def test_all_docker_osds_are_up_and_in(self, node, host): - osd_scenario = node["vars"].get('osd_scenario', False) - if osd_scenario in ['lvm', 'lvm-batch']: - osd_id = "0" - else: - hostname = node["vars"]["inventory_hostname"] - osd_id = os.path.join(hostname+"-sda") - - cmd = "sudo docker exec ceph-osd-{osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( - osd_id=osd_id, - cluster=node["cluster_name"] - ) - output = json.loads(host.check_output(cmd)) - assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output) From e77c36ad1737268397f909b5809aae625688f2a9 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 18 Oct 2018 13:41:49 +0200 Subject: [PATCH 087/105] infra: move restart fw handler in ceph-infra role Move the handler to restart firewall in ceph-infra role. Closes: #3243 Signed-off-by: Guillaume Abrioux --- roles/{ceph-common => ceph-infra}/handlers/main.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/{ceph-common => ceph-infra}/handlers/main.yml (100%) diff --git a/roles/ceph-common/handlers/main.yml b/roles/ceph-infra/handlers/main.yml similarity index 100% rename from roles/ceph-common/handlers/main.yml rename to roles/ceph-infra/handlers/main.yml From cb35cac92662de35d2dafe8c103996d750d76778 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 18 Oct 2018 13:45:14 +0200 Subject: [PATCH 088/105] tests: set configure_firewall: true in centos7|docker_cluster This way the CI will cover this part of the code. Signed-off-by: Guillaume Abrioux --- tests/functional/centos/7/cluster/group_vars/all | 1 + tests/functional/centos/7/docker/group_vars/all | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index 46934a5b0..5a72a120a 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -4,6 +4,7 @@ ceph_repository: community cluster: test public_network: "192.168.1.0/24" cluster_network: "192.168.2.0/24" +configure_firewall: true radosgw_interface: eth1 ceph_conf_overrides: global: diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index c3b5a92d0..c53a831ab 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -7,6 +7,7 @@ containerized_deployment: True cluster: test monitor_interface: eth1 radosgw_interface: eth1 +configure_firewall: true ceph_mon_docker_subnet: "{{ public_network }}" ceph_docker_on_openstack: False public_network: "192.168.17.0/24" From 1f9090884eae9d94f9952eecc528bce700a06cbc Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 18 Oct 2018 15:43:36 +0200 Subject: [PATCH 089/105] Revert "tests: test `test_all_docker_osds_are_up_and_in()` from mon nodes" This approach doesn't work with all scenarios because it's comparing a local OSD number expected to a global OSD number found in the whole cluster. This reverts commit b8ad35ceb99cdbd1644c79dd689b818f095ba8b8. Signed-off-by: Guillaume Abrioux --- .../tests/mon/test_osds_from_mons.py | 28 -------------- tests/functional/tests/osd/test_osds.py | 37 +++++++++++++++++++ 2 files changed, 37 insertions(+), 28 deletions(-) delete mode 100644 tests/functional/tests/mon/test_osds_from_mons.py diff --git a/tests/functional/tests/mon/test_osds_from_mons.py b/tests/functional/tests/mon/test_osds_from_mons.py deleted file mode 100644 index f8eeab74b..000000000 --- a/tests/functional/tests/mon/test_osds_from_mons.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -import json - - -class TestOsdsFromMons(object): - def _get_nb_osd_up(self, osd_tree): - nb_up = 0 - for n in osd_tree['nodes']: - if n['type'] == 'osd' and n['status'] == 'up': - nb_up += 1 - return nb_up - - @pytest.mark.no_docker - def test_all_osds_are_up_and_in(self, node, host): - cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 osd tree -f json".format(cluster=node["cluster_name"]) - output = json.loads(host.check_output(cmd)) - nb_osd_up = self._get_nb_osd_up(output) - assert int(node["num_osds"]) == int(nb_osd_up) - - @pytest.mark.docker - def test_all_docker_osds_are_up_and_in(self, node, host): - cmd = "sudo docker exec ceph-mon-{inventory_hostname} ceph --cluster={cluster} --connect-timeout 5 osd tree -f json".format( - cluster=node["cluster_name"], - inventory_hostname=node['vars']['inventory_hostname'] - ) - output = json.loads(host.check_output(cmd)) - nb_osd_up = self._get_nb_osd_up(output) - assert node["num_osds"] == nb_osd_up diff --git a/tests/functional/tests/osd/test_osds.py b/tests/functional/tests/osd/test_osds.py index 20fbeacb4..851c7d7f5 100644 --- a/tests/functional/tests/osd/test_osds.py +++ b/tests/functional/tests/osd/test_osds.py @@ -47,3 +47,40 @@ class TestOSDs(object): @pytest.mark.lvm_scenario def test_ceph_volume_systemd_is_installed(self, node, host): host.exists('ceph-volume-systemd') + + def _get_osd_id_from_host(self, node, osd_tree): + children = [] + for n in osd_tree['nodes']: + if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': + children = n['children'] + return children + + def _get_nb_up_osds_from_ids(self, node, osd_tree): + nb_up = 0 + ids = self._get_osd_id_from_host(node, osd_tree) + for n in osd_tree['nodes']: + if n['id'] in ids and n['status'] == 'up': + nb_up += 1 + return nb_up + + @pytest.mark.no_docker + def test_all_osds_are_up_and_in(self, node, host): + cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(cluster=node["cluster_name"]) + output = json.loads(host.check_output(cmd)) + assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output) + + @pytest.mark.docker + def test_all_docker_osds_are_up_and_in(self, node, host): + osd_scenario = node["vars"].get('osd_scenario', False) + if osd_scenario in ['lvm', 'lvm-batch']: + osd_id = "0" + else: + hostname = node["vars"]["inventory_hostname"] + osd_id = os.path.join(hostname+"-sda") + + cmd = "sudo docker exec ceph-osd-{osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( + osd_id=osd_id, + cluster=node["cluster_name"] + ) + output = json.loads(host.check_output(cmd)) + assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output) From 8fa437b7bd53a4a1057fc82229c6b0b47ed5d3d1 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 18 Oct 2018 22:29:02 +0200 Subject: [PATCH 090/105] iscsi: fix networking issue on containerized env The iscsi-gw containers can't reach monitors without `--net=host` Signed-off-by: Guillaume Abrioux --- roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 | 1 + roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 | 1 + roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 | 1 + 3 files changed, 3 insertions(+) diff --git a/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 b/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 index 69781693d..2cc19231f 100644 --- a/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 +++ b/roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2 @@ -16,6 +16,7 @@ ExecStart=/usr/bin/docker run --rm \ -v /etc/localtime:/etc/localtime:ro \ --privileged \ --cap-add=ALL \ + --net=host \ -v /dev:/dev \ -v /lib/modules:/lib/modules \ -v /etc/ceph:/etc/ceph \ diff --git a/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 b/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 index 3aa18d1c1..672988498 100644 --- a/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 +++ b/roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2 @@ -16,6 +16,7 @@ ExecStart=/usr/bin/docker run --rm \ -v /etc/localtime:/etc/localtime:ro \ --privileged \ --cap-add=ALL \ + --net=host \ -v /dev:/dev \ -v /lib/modules:/lib/modules \ -v /etc/ceph:/etc/ceph \ diff --git a/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 b/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 index 8159125f5..bbaf58ee7 100644 --- a/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 +++ b/roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2 @@ -15,6 +15,7 @@ ExecStart=/usr/bin/docker run --rm \ {% endif -%} -v /etc/localtime:/etc/localtime:ro \ --privileged \ + --net=host \ --cap-add=ALL \ -v /dev:/dev \ -v /lib/modules:/lib/modules \ From a439eb574d33681bf0e9761fb649c5dbddd2c662 Mon Sep 17 00:00:00 2001 From: Andrew Schoen Date: Tue, 16 Oct 2018 10:20:54 -0500 Subject: [PATCH 091/105] validate: check the version of python-notario If the version of python-notario is < 0.0.13 an error message is given like "TypeError: validate() got an unexpected keyword argument 'defined_keys'", which is not helpful in figuring out you've got an incorrect version of python-notario. This check will avoid that situation by telling the user that they need to upgrade python-notario before they hit that error. Signed-off-by: Andrew Schoen --- plugins/actions/validate.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/actions/validate.py b/plugins/actions/validate.py index 12667404e..5cedd83e7 100644 --- a/plugins/actions/validate.py +++ b/plugins/actions/validate.py @@ -1,5 +1,6 @@ from ansible.plugins.action import ActionBase +from distutils.version import LooseVersion try: from __main__ import display @@ -14,6 +15,11 @@ except ImportError: display.error(msg) raise SystemExit(msg) +if LooseVersion(notario.__version__) < LooseVersion("0.0.13"): + msg = "The python-notario libary has an incompatible version. Version >= 0.0.13 is needed, current version: %s" % notario.__version__ + display.error(msg) + raise SystemExit(msg) + from notario.exceptions import Invalid from notario.validators import types, chainable, iterables from notario.decorators import optional From 44d0da0dd497bfab040c1e64fb406e4c13150028 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Thu, 9 Aug 2018 11:32:53 +0200 Subject: [PATCH 092/105] rolling_update: fix upgrade when using fqdn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CLusters that were deployed using 'mon_use_fqdn' have a different unit name, so during the upgrade this must be used otherwise the upgrade will fail, looking for a unit that does not exist. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1597516 Signed-off-by: Sébastien Han --- infrastructure-playbooks/rolling_update.yml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 0dfdd0a52..e06dd40d3 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -105,11 +105,21 @@ - containerized_deployment - mon_host_count | int == 1 - - name: stop ceph mon + - name: stop ceph mon - shortname systemd: name: ceph-mon@{{ ansible_hostname }} state: stopped enabled: yes + ignore_errors: True + when: + - not containerized_deployment + + - name: stop ceph mon - fqdn + systemd: + name: ceph-mon@{{ ansible_fqdn }} + state: stopped + enabled: yes + ignore_errors: True when: - not containerized_deployment @@ -124,7 +134,7 @@ post_tasks: - name: start ceph mon systemd: - name: ceph-mon@{{ ansible_hostname }} + name: ceph-mon@{{ monitor_name }} state: started enabled: yes when: @@ -132,7 +142,7 @@ - name: restart containerized ceph mon systemd: - name: ceph-mon@{{ ansible_hostname }} + name: ceph-mon@{{ monitor_name }} state: restarted enabled: yes daemon_reload: yes From 48cfc60722da0fc88b7230b20a131b9eb0f6d3d2 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 19 Oct 2018 13:16:23 +0200 Subject: [PATCH 093/105] defaults: set default `configure_firewall` to `True` Let's configure firewalld by default. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1526400 Signed-off-by: Guillaume Abrioux --- group_vars/all.yml.sample | 2 +- group_vars/rhcs.yml.sample | 2 +- roles/ceph-defaults/defaults/main.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index d80ac21c2..738efeed5 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -59,7 +59,7 @@ dummy: # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. -#configure_firewall: False +#configure_firewall: True # Open ports on corresponding nodes if firewall is installed on it #ceph_mon_firewall_zone: public diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 68b69ed40..b9cac0d62 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -59,7 +59,7 @@ fetch_directory: ~/ceph-ansible-keys # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. -#configure_firewall: False +#configure_firewall: True # Open ports on corresponding nodes if firewall is installed on it #ceph_mon_firewall_zone: public diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 7277a0171..917b86877 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -51,7 +51,7 @@ mgr_group_name: mgrs # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. -configure_firewall: False +configure_firewall: True # Open ports on corresponding nodes if firewall is installed on it ceph_mon_firewall_zone: public From c47aa2e83b81b4678d58f427c800faa77e9dd719 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 19 Oct 2018 13:19:59 +0200 Subject: [PATCH 094/105] tests: remove unnecessary variables definition since we set `configure_firewall: true` in `ceph-defaults/defaults/main.yml` there is no need to explicitly set it in `centos7_cluster` and `docker_cluster` testing scenarios. Signed-off-by: Guillaume Abrioux --- tests/functional/centos/7/cluster/group_vars/all | 1 - tests/functional/centos/7/docker/group_vars/all | 1 - 2 files changed, 2 deletions(-) diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index 5a72a120a..46934a5b0 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: test public_network: "192.168.1.0/24" cluster_network: "192.168.2.0/24" -configure_firewall: true radosgw_interface: eth1 ceph_conf_overrides: global: diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index c53a831ab..c3b5a92d0 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -7,7 +7,6 @@ containerized_deployment: True cluster: test monitor_interface: eth1 radosgw_interface: eth1 -configure_firewall: true ceph_mon_docker_subnet: "{{ public_network }}" ceph_docker_on_openstack: False public_network: "192.168.17.0/24" From ee2d52d33df2a311cdf0ff62abd353fccb3affbc Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Mon, 1 Oct 2018 11:11:13 -0400 Subject: [PATCH 095/105] allow custom pool size Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1596339 Signed-off-by: Rishabh Dave --- group_vars/all.yml.sample | 5 ++ group_vars/rhcs.yml.sample | 5 ++ roles/ceph-client/defaults/main.yml | 2 + roles/ceph-client/tasks/create_users_keys.yml | 71 +++++++++------- roles/ceph-defaults/defaults/main.yml | 31 ++++++- roles/ceph-iscsi-gw/defaults/main.yml | 1 + roles/ceph-iscsi-gw/tasks/common.yml | 28 ++++--- .../ceph-mds/tasks/create_mds_filesystems.yml | 21 +++-- roles/ceph-osd/tasks/openstack_config.yml | 82 +++++++++++-------- roles/ceph-rgw/defaults/main.yml | 3 + roles/ceph-rgw/tasks/main.yml | 22 +++-- .../centos/7/cluster/ceph-override.json | 15 +++- .../centos/7/cluster/group_vars/all | 4 +- .../centos/7/cluster/group_vars/clients | 2 + .../functional/centos/7/docker/group_vars/all | 4 +- 15 files changed, 203 insertions(+), 93 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 738efeed5..f2fc4d54b 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -558,6 +558,7 @@ dummy: # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_cinder_pool: # name: "volumes" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -567,6 +568,7 @@ dummy: # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_nova_pool: # name: "vms" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -576,6 +578,7 @@ dummy: # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_cinder_backup_pool: # name: "backups" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -585,6 +588,7 @@ dummy: # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_gnocchi_pool: # name: "metrics" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -594,6 +598,7 @@ dummy: # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_pools: # - "{{ openstack_glance_pool }}" diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index b9cac0d62..5cc7da715 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -558,6 +558,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_cinder_pool: # name: "volumes" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -567,6 +568,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_nova_pool: # name: "vms" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -576,6 +578,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_cinder_backup_pool: # name: "backups" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -585,6 +588,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_gnocchi_pool: # name: "metrics" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -594,6 +598,7 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # erasure_profile: "" # expected_num_objects: "" # application: "rbd" +# size: "" #openstack_pools: # - "{{ openstack_glance_pool }}" diff --git a/roles/ceph-client/defaults/main.yml b/roles/ceph-client/defaults/main.yml index ec477f299..80f9a5552 100644 --- a/roles/ceph-client/defaults/main.yml +++ b/roles/ceph-client/defaults/main.yml @@ -18,6 +18,7 @@ test: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" test2: name: "test2" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -26,6 +27,7 @@ test2: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" pools: - "{{ test }}" - "{{ test2 }}" diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index 72f11d0e7..b98f4bb88 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -84,40 +84,51 @@ - keys | length > 0 - inventory_hostname == groups.get('_filtered_clients') | first -- name: list existing pool(s) - command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} - osd pool get {{ item.name }} size - with_items: "{{ pools }}" - register: created_pools - failed_when: false - delegate_to: "{{ delegated_node }}" +- name: pool related tasks when: - condition_copy_admin_key - inventory_hostname == groups.get('_filtered_clients', []) | first + block: + - name: list existing pool(s) + command: > + {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + osd pool get {{ item.name }} size + with_items: "{{ pools }}" + register: created_pools + failed_when: false + delegate_to: "{{ delegated_node }}" -- name: create ceph pool(s) - command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} - osd pool create {{ item.0.name }} - {{ item.0.pg_num }} - {{ item.0.pgp_num }} - {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} - {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} - {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} - {{ item.0.erasure_profile }} - {%- endif %} - {{ item.0.expected_num_objects | default('') }} - with_together: - - "{{ pools }}" - - "{{ created_pools.results }}" - changed_when: false - delegate_to: "{{ delegated_node }}" - when: - - pools | length > 0 - - condition_copy_admin_key - - inventory_hostname in groups.get('_filtered_clients') | first - - item.1.rc != 0 + - name: create ceph pool(s) + command: > + {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + osd pool create {{ item.0.name }} + {{ item.0.pg_num }} + {{ item.0.pgp_num }} + {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} + {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} + {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} + {{ item.0.erasure_profile }} + {%- endif %} + {{ item.0.expected_num_objects | default('') }} + with_together: + - "{{ pools }}" + - "{{ created_pools.results }}" + changed_when: false + delegate_to: "{{ delegated_node }}" + when: + - pools | length > 0 + - item.1.rc != 0 + + - name: customize pool size + command: > + {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + osd pool set {{ item.name }} size {{ item.size | default('') }} + with_items: "{{ pools | unique }}" + delegate_to: "{{ delegate_node }}" + changed_when: false + when: + - pools | length > 0 + - item.size | default ("") != "" - name: get client cephx keys copy: diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 917b86877..2e20d61fb 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -346,8 +346,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem cephfs_pools: - - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } - - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } + - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } + - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } ## OSD options # @@ -550,6 +550,7 @@ openstack_glance_pool: erasure_profile: "" expected_num_objects: "" application: "rbd" + size: "" openstack_cinder_pool: name: "volumes" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -559,6 +560,7 @@ openstack_cinder_pool: erasure_profile: "" expected_num_objects: "" application: "rbd" + size: "" openstack_nova_pool: name: "vms" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -568,6 +570,7 @@ openstack_nova_pool: erasure_profile: "" expected_num_objects: "" application: "rbd" + size: "" openstack_cinder_backup_pool: name: "backups" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -577,6 +580,7 @@ openstack_cinder_backup_pool: erasure_profile: "" expected_num_objects: "" application: "rbd" + size: "" openstack_gnocchi_pool: name: "metrics" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -586,6 +590,27 @@ openstack_gnocchi_pool: erasure_profile: "" expected_num_objects: "" application: "rbd" + size: "" +openstack_cephfs_data_pool: + name: "manila_data" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + application: "rbd" + size: "" +openstack_cephfs_metadata_pool: + name: "manila_metadata" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + application: "rbd" + size: "" openstack_pools: - "{{ openstack_glance_pool }}" @@ -593,6 +618,8 @@ openstack_pools: - "{{ openstack_nova_pool }}" - "{{ openstack_cinder_backup_pool }}" - "{{ openstack_gnocchi_pool }}" + - "{{ openstack_cephfs_data_pool }}" + - "{{ openstack_cephfs_metadata_pool }}" # The value for 'key' can be a pre-generated key, diff --git a/roles/ceph-iscsi-gw/defaults/main.yml b/roles/ceph-iscsi-gw/defaults/main.yml index 94309e74a..5f8b8a703 100644 --- a/roles/ceph-iscsi-gw/defaults/main.yml +++ b/roles/ceph-iscsi-gw/defaults/main.yml @@ -56,6 +56,7 @@ client_connections: {} # Whether or not to generate secure certificate to iSCSI gateway nodes generate_crt: False +rbd_pool_size: "" ################## # RBD-TARGET-API # diff --git a/roles/ceph-iscsi-gw/tasks/common.yml b/roles/ceph-iscsi-gw/tasks/common.yml index 8e9383c9d..9f54ebde2 100644 --- a/roles/ceph-iscsi-gw/tasks/common.yml +++ b/roles/ceph-iscsi-gw/tasks/common.yml @@ -32,15 +32,23 @@ register: rbd_pool_exists delegate_to: "{{ groups[mon_group_name][0] }}" -- name: get default value for osd_pool_default_pg_num - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num" - changed_when: false - register: osd_pool_default_pg_num - delegate_to: "{{ groups[mon_group_name][0] }}" +- name: rbd pool related tasks when: "'rbd' not in (rbd_pool_exists.stdout | from_json)" + block: + - name: get default value for osd_pool_default_pg_num + command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num" + changed_when: false + register: osd_pool_default_pg_num + delegate_to: "{{ groups[mon_group_name][0] }}" -- name: create a rbd pool if it doesn't exist - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}" - changed_when: false - delegate_to: "{{ groups[mon_group_name][0] }}" - when: "'rbd' not in (rbd_pool_exists.stdout | from_json)" + - name: create a rbd pool if it doesn't exist + command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: customize pool size + command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default('') }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: + - rbd_pool_size | default ("") != "" diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index 8418a5cc7..787fbe2bd 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -1,10 +1,19 @@ --- -- name: create filesystem pools - command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}" - changed_when: false - delegate_to: "{{ groups[mon_group_name][0] }}" - with_items: - - "{{ cephfs_pools }}" +- name: filesystem pools related tasks + block: + - name: create filesystem pools + command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + with_items: + - "{{ cephfs_pools }}" + + - name: customize pool size + command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default('') }}" + with_items: "{{ cephfs_pools | unique }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: item.size | default ("") != "" - name: check if ceph filesystem already exists command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}" diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index 80fb571ad..052345aef 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -9,42 +9,53 @@ delegate_to: "{{ groups[mon_group_name][0] }}" until: wait_for_all_osds_up.rc == 0 -- name: list existing pool(s) - command: > - {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} - osd pool get {{ item.name }} size - with_items: "{{ openstack_pools | unique }}" - register: created_pools - delegate_to: "{{ groups[mon_group_name][0] }}" - failed_when: false +- name: pool related tasks + block: + - name: list existing pool(s) + command: > + {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} + osd pool get {{ item.name }} size + with_items: "{{ openstack_pools | unique }}" + register: created_pools + delegate_to: "{{ groups[mon_group_name][0] }}" + failed_when: false -- name: create openstack pool(s) - command: > - {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} - osd pool create {{ item.0.name }} - {{ item.0.pg_num }} - {{ item.0.pgp_num | default(item.0.pg_num) }} - {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} - {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} - {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} - {{ item.0.erasure_profile }} - {%- endif %} - {{ item.0.expected_num_objects | default('') }} - with_together: - - "{{ openstack_pools | unique }}" - - "{{ created_pools.results }}" - changed_when: false - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item.1.get('rc', 0) != 0 + - name: create openstack pool(s) + command: > + {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} + osd pool create {{ item.0.name }} + {{ item.0.pg_num }} + {{ item.0.pgp_num | default(item.0.pg_num) }} + {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} + {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} + {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} + {{ item.0.erasure_profile }} + {%- endif %} + {{ item.0.expected_num_objects | default('') }} + with_together: + - "{{ openstack_pools | unique }}" + - "{{ created_pools.results }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - item.1.get('rc', 0) != 0 -- name: assign application to pool(s) - command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" - with_items: "{{ openstack_pools | unique }}" - changed_when: false - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item.application is defined + - name: customize pool size + command: > + {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} + osd pool set {{ item.name }} size {{ item.size | default('') }} + with_items: "{{ openstack_pools | unique }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: item.size | default ("") != "" + + - name: assign application to pool(s) + command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" + with_items: "{{ openstack_pools | unique }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - item.application is defined - name: create openstack cephx key(s) ceph_key: @@ -81,4 +92,5 @@ when: - cephx - openstack_config - - item.0 != groups[mon_group_name] \ No newline at end of file + - item.0 != groups[mon_group_name] + diff --git a/roles/ceph-rgw/defaults/main.yml b/roles/ceph-rgw/defaults/main.yml index b3ff65643..1e108be44 100644 --- a/roles/ceph-rgw/defaults/main.yml +++ b/roles/ceph-rgw/defaults/main.yml @@ -37,10 +37,13 @@ rgw_pull_proto: "http" #rgw_create_pools: # defaults.rgw.buckets.data: # pg_num: 16 +# size: "" # defaults.rgw.buckets.index: # pg_num: 32 +# size: "" # foo: # pg_num: 4 +# size: "" ########## diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 88935e0f7..9a1a65ae5 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -24,11 +24,21 @@ include_tasks: docker/main.yml when: containerized_deployment -- name: create rgw pools if rgw_create_pools is defined - command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" - changed_when: false - with_dict: "{{ rgw_create_pools }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - run_once: true +- name: rgw pool realted tasks when: - rgw_create_pools is defined + block: + - name: create rgw pools if rgw_create_pools is defined + command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}" + changed_when: false + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + + - name: customize pool size + command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default('') }}" + with_dict: "{{ rgw_create_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + run_once: true + when: item.size | default ("") != "" diff --git a/tests/functional/centos/7/cluster/ceph-override.json b/tests/functional/centos/7/cluster/ceph-override.json index 1a9600a14..f2ec97114 100644 --- a/tests/functional/centos/7/cluster/ceph-override.json +++ b/tests/functional/centos/7/cluster/ceph-override.json @@ -1,9 +1,20 @@ { "ceph_conf_overrides": { "global": { - "osd_pool_default_pg_num": 12, - "osd_pool_default_size": 1 + "osd_pool_default_pg_num": 12 } }, + "cephfs_pools": [ + { + "name": "cephfs_metadata", + "pgs": 8, + "size": 2 + }, + { + "name": "cephfs_data", + "pgs": 8, + "size": 2 + } + ], "ceph_mon_docker_memory_limit": "2g" } diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index 46934a5b0..5f9c83052 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -27,6 +27,7 @@ openstack_glance_pool: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" openstack_cinder_pool: name: "volumes" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -35,6 +36,7 @@ openstack_cinder_pool: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" openstack_pools: - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" \ No newline at end of file + - "{{ openstack_cinder_pool }}" diff --git a/tests/functional/centos/7/cluster/group_vars/clients b/tests/functional/centos/7/cluster/group_vars/clients index 21e5c5691..55180053b 100644 --- a/tests/functional/centos/7/cluster/group_vars/clients +++ b/tests/functional/centos/7/cluster/group_vars/clients @@ -9,6 +9,7 @@ test: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" test2: name: "test2" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -17,6 +18,7 @@ test2: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" pools: - "{{ test }}" - "{{ test2 }}" diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index c3b5a92d0..57e385766 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -28,6 +28,7 @@ openstack_glance_pool: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" openstack_cinder_pool: name: "volumes" pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -36,6 +37,7 @@ openstack_cinder_pool: type: 1 erasure_profile: "" expected_num_objects: "" + size: "" openstack_pools: - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" \ No newline at end of file + - "{{ openstack_cinder_pool }}" From 4d698ce831f1e1173b8f707506a97c168eb4f6e9 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 23 Oct 2018 09:49:50 +0200 Subject: [PATCH 096/105] ceph-infra: reload firewall after rules are added we ensure that firewalld is installed and running before adding any rule. This has no sense anymore not to reload firewalld once the rule are added. Signed-off-by: Guillaume Abrioux --- roles/ceph-infra/tasks/configure_firewall.yml | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index d0075979c..9fbbc2938 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -27,7 +27,7 @@ zone: "{{ ceph_mon_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -43,7 +43,7 @@ zone: "{{ ceph_mgr_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -59,7 +59,7 @@ zone: "{{ ceph_osd_firewall_zone }}" source: "{{ item }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled with_items: - "{{ public_network }}" @@ -78,7 +78,7 @@ zone: "{{ ceph_rgw_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -94,7 +94,7 @@ zone: "{{ ceph_mds_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -110,7 +110,7 @@ zone: "{{ ceph_nfs_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -126,7 +126,7 @@ zone: "{{ ceph_nfs_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -142,7 +142,7 @@ zone: "{{ ceph_restapi_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -158,7 +158,7 @@ zone: "{{ ceph_rbdmirror_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: @@ -174,7 +174,7 @@ zone: "{{ ceph_iscsi_firewall_zone }}" source: "{{ public_network }}" permanent: true - immediate: false # if true then fails in case firewalld is stopped + immediate: true state: enabled notify: restart firewalld when: From ff4dc83b87c31269106d7c0c7e475e7a08e96b06 Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Wed, 10 Oct 2018 02:17:40 +0530 Subject: [PATCH 097/105] ceph-validate: avoid "list index out of range" error Be sure that error.path has more than one members before using them. Signed-off-by: Rishabh Dave --- plugins/actions/validate.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/actions/validate.py b/plugins/actions/validate.py index 5cedd83e7..e6e40f9fe 100644 --- a/plugins/actions/validate.py +++ b/plugins/actions/validate.py @@ -102,7 +102,12 @@ class ActionModule(ActionBase): reason = "[{}] Reason: {}".format(host, error.reason) try: if "schema is missing" not in error.message: - given = "[{}] Given value for {}: {}".format(host, error.path[0], error.path[1]) + for i in range(0, len(error.path)): + if i == 0: + given = "[{}] Given value for {}".format( + host, error.path[0]) + else: + given = given + ": {}".format(error.path[i]) display.error(given) else: given = "" From 090486003265830ce97cbe5ae243cecc7cbbc1a7 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 12 Sep 2018 15:37:44 -0500 Subject: [PATCH 098/105] igw: stop daemons on purge all calls When purging the entire igw config (lio and rbd) stop disable the api and gw daemons. Fixes Red Hat BZ https://bugzilla.redhat.com/show_bug.cgi?id=1621255 Signed-off-by: Mike Christie --- infrastructure-playbooks/purge-iscsi-gateways.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/infrastructure-playbooks/purge-iscsi-gateways.yml b/infrastructure-playbooks/purge-iscsi-gateways.yml index 369901430..8fdf2d8e1 100644 --- a/infrastructure-playbooks/purge-iscsi-gateways.yml +++ b/infrastructure-playbooks/purge-iscsi-gateways.yml @@ -35,5 +35,20 @@ igw_purge: mode="disks" when: igw_purge_type == 'all' + - name: stop and disable rbd-target-api daemon + service: + name: rbd-target-api + state: stopped + enabled: no + when: igw_purge_type == 'all' + + - name: stop and disable rbd-target-gw daemon + service: + name: rbd-target-gw + state: stopped + enabled: no + when: igw_purge_type == 'all' + - name: restart rbd-target-gw daemons service: name=rbd-target-gw state=restarted + when: igw_purge_type == 'lio' From a0cceb3e44f17f417f1c7d86c51f915dbaf0bd2f Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Thu, 25 Oct 2018 14:42:54 +0200 Subject: [PATCH 099/105] tox: fix a typo the line setting `ANSIBLE_CONFIG` obviously contains a typo introduced by 1e283bf69be8b9efbc1a7a873d91212ad57c7351 `ANSIBLE_CONFIG` has to point to a path only (path to an ansible.cfg) Signed-off-by: Guillaume Abrioux --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 6121e6175..5fd68f161 100644 --- a/tox.ini +++ b/tox.ini @@ -172,7 +172,7 @@ passenv=* sitepackages=True setenv= ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config - ANSIBLE_CONFIG = -F {toxinidir}/ansible.cfg + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback ANSIBLE_CALLBACK_WHITELIST = profile_tasks From cd3d6409fe9226d1d2acad89456196b349408d20 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 26 Oct 2018 09:46:29 +0200 Subject: [PATCH 100/105] resync group_vars/*.sample files ee2d52d33df2a311cdf0ff62abd353fccb3affbc missed this sync between ceph-defaults/defaults/main.yml and group_vars/all.yml.sampl Signed-off-by: Guillaume Abrioux --- group_vars/all.yml.sample | 26 ++++++++++++++++++++++++-- group_vars/clients.yml.sample | 2 ++ group_vars/iscsigws.yml.sample | 1 + group_vars/rgws.yml.sample | 3 +++ group_vars/rhcs.yml.sample | 26 ++++++++++++++++++++++++-- 5 files changed, 54 insertions(+), 4 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index f2fc4d54b..7b1572ae3 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -354,8 +354,8 @@ dummy: #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_pools: -# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } -# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } +# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } +# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } ## OSD options # @@ -599,6 +599,26 @@ dummy: # expected_num_objects: "" # application: "rbd" # size: "" +#openstack_cephfs_data_pool: +# name: "manila_data" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "" +#openstack_cephfs_metadata_pool: +# name: "manila_metadata" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "" #openstack_pools: # - "{{ openstack_glance_pool }}" @@ -606,6 +626,8 @@ dummy: # - "{{ openstack_nova_pool }}" # - "{{ openstack_cinder_backup_pool }}" # - "{{ openstack_gnocchi_pool }}" +# - "{{ openstack_cephfs_data_pool }}" +# - "{{ openstack_cephfs_metadata_pool }}" # The value for 'key' can be a pre-generated key, diff --git a/group_vars/clients.yml.sample b/group_vars/clients.yml.sample index 01d54404d..5bae33868 100644 --- a/group_vars/clients.yml.sample +++ b/group_vars/clients.yml.sample @@ -26,6 +26,7 @@ dummy: # type: 1 # erasure_profile: "" # expected_num_objects: "" +# size: "" #test2: # name: "test2" # pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" @@ -34,6 +35,7 @@ dummy: # type: 1 # erasure_profile: "" # expected_num_objects: "" +# size: "" #pools: # - "{{ test }}" # - "{{ test2 }}" diff --git a/group_vars/iscsigws.yml.sample b/group_vars/iscsigws.yml.sample index 442432473..fbed716f1 100644 --- a/group_vars/iscsigws.yml.sample +++ b/group_vars/iscsigws.yml.sample @@ -64,6 +64,7 @@ dummy: # Whether or not to generate secure certificate to iSCSI gateway nodes #generate_crt: False +#rbd_pool_size: "" ################## # RBD-TARGET-API # diff --git a/group_vars/rgws.yml.sample b/group_vars/rgws.yml.sample index bfdf66b59..e8cc1c768 100644 --- a/group_vars/rgws.yml.sample +++ b/group_vars/rgws.yml.sample @@ -45,10 +45,13 @@ dummy: #rgw_create_pools: # defaults.rgw.buckets.data: # pg_num: 16 +# size: "" # defaults.rgw.buckets.index: # pg_num: 32 +# size: "" # foo: # pg_num: 4 +# size: "" ########## diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 5cc7da715..7f81594ef 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -354,8 +354,8 @@ ceph_rhcs_version: 3 #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem #cephfs_pools: -# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } -# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" } +# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } +# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" } ## OSD options # @@ -599,6 +599,26 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # expected_num_objects: "" # application: "rbd" # size: "" +#openstack_cephfs_data_pool: +# name: "manila_data" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "" +#openstack_cephfs_metadata_pool: +# name: "manila_metadata" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +# application: "rbd" +# size: "" #openstack_pools: # - "{{ openstack_glance_pool }}" @@ -606,6 +626,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/" # - "{{ openstack_nova_pool }}" # - "{{ openstack_cinder_backup_pool }}" # - "{{ openstack_gnocchi_pool }}" +# - "{{ openstack_cephfs_data_pool }}" +# - "{{ openstack_cephfs_metadata_pool }}" # The value for 'key' can be a pre-generated key, From c58100002b3e3d052a5eeb3891f6544705ec56ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 24 Oct 2018 16:53:12 +0200 Subject: [PATCH 101/105] ceph_volume: expose ceph-volume logs on the host MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will tremendously help debugging failures while performing any ceph-volume command in containers. Signed-off-by: Sébastien Han --- library/ceph_volume.py | 1 + library/test_ceph_volume.py | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 416867e6b..92413c017 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -184,6 +184,7 @@ def container_exec(binary, container_image): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', os.path.join('--entrypoint=' + binary), container_image] return command_exec diff --git a/library/test_ceph_volume.py b/library/test_ceph_volume.py index 2f090a6ac..d0cee813f 100644 --- a/library/test_ceph_volume.py +++ b/library/test_ceph_volume.py @@ -44,6 +44,7 @@ class TestCephVolumeModule(object): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', '--entrypoint=ceph-volume', 'docker.io/ceph/daemon:latest-luminous'] result = ceph_volume.container_exec(fake_binary, fake_container_image) @@ -58,6 +59,7 @@ class TestCephVolumeModule(object): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', '--entrypoint=ceph-volume', 'docker.io/ceph/daemon:latest-luminous', 'lvm', @@ -111,6 +113,7 @@ class TestCephVolumeModule(object): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', '--entrypoint=ceph-volume', 'docker.io/ceph/daemon:latest-luminous', '--cluster', @@ -136,6 +139,7 @@ class TestCephVolumeModule(object): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', '--entrypoint=ceph-volume', 'docker.io/ceph/daemon:latest-luminous', '--cluster', @@ -182,6 +186,7 @@ class TestCephVolumeModule(object): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', '--entrypoint=ceph-volume', 'docker.io/ceph/daemon:latest-luminous', '--cluster', @@ -229,6 +234,7 @@ class TestCephVolumeModule(object): '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501 '-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501 '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', '--entrypoint=ceph-volume', 'docker.io/ceph/daemon:latest-luminous', '--cluster', From 91385e4ff6c030f36f97ca58e8dff8dd68d15453 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 24 Oct 2018 16:55:52 +0200 Subject: [PATCH 102/105] ceph_volume: better error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When loading the json, if invalid, we should fail with a meaningful error. Signed-off-by: Sébastien Han --- library/ceph_volume.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 92413c017..b07c1d43b 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -518,8 +518,12 @@ def run_module(): # see: http://tracker.ceph.com/issues/36329 # FIXME: it's probably less confusing to check for rc - # convert out to json, ansible return a string... - out_dict = json.loads(out) + # convert out to json, ansible returns a string... + try: + out_dict = json.loads(out) + except ValueError: + fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa E501 + if out_dict: data = module.params['data'] result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 From fc20973c2b9a89a15f8940cd100c34df4caca030 Mon Sep 17 00:00:00 2001 From: Jairo Llopis Date: Thu, 4 Oct 2018 07:48:03 +0200 Subject: [PATCH 103/105] Fix problem with ceph_key in python3 Pretty basic problem of iteritems removal. Signed-off-by: Jairo Llopis --- library/ceph_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/ceph_key.py b/library/ceph_key.py index c6803302f..e6d66bac1 100644 --- a/library/ceph_key.py +++ b/library/ceph_key.py @@ -205,7 +205,7 @@ def generate_caps(cmd, _type, caps): Generate CephX capabilities list ''' - for k, v in caps.iteritems(): + for k, v in caps.items(): # makes sure someone didn't pass an empty var, # we don't want to add an empty cap if len(k) == 0: From 5ab90b358c7e6571aaf76e4c091b7b03b78fdeea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 26 Oct 2018 15:27:33 +0200 Subject: [PATCH 104/105] nfs: do not create the nfs user if already present MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check if the user exists and skip its creation if true. Closes: https://github.com/ceph/ceph-ansible/issues/3254 Signed-off-by: Sébastien Han --- roles/ceph-nfs/tasks/create_rgw_nfs_user.yml | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml index 6a8eadcff..73f6e3690 100644 --- a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -5,7 +5,17 @@ when: - containerized_deployment -- name: create rgw nfs user +- name: check if "{{ ceph_nfs_rgw_user }}" exists + command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}" + run_once: true + register: rgwuser_exists + changed_when: false + failed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - nfs_obj_gw + +- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}" command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'" run_once: true register: rgwuser @@ -13,10 +23,11 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: - nfs_obj_gw + - rgwuser_exists.get('rc', 1) != 0 - name: set_fact ceph_nfs_rgw_access_key set_fact: - ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] }}" + ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['access_key'] }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: - nfs_obj_gw @@ -24,7 +35,7 @@ - name: set_fact ceph_nfs_rgw_secret_key set_fact: - ceph_nfs_rgw_secret_key: "{{(rgwuser.stdout | from_json)['keys'][0]['secret_key']}}" + ceph_nfs_rgw_secret_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['secret_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['secret_key'] }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: - nfs_obj_gw From 219fa8f9191f412edbfd068840289753ba4a2c87 Mon Sep 17 00:00:00 2001 From: Ali Maredia Date: Mon, 29 Oct 2018 06:01:25 +0000 Subject: [PATCH 105/105] infrastructure playbooks: ensure nvme_device is defined in lv-create.yml Signed-off-by: Ali Maredia --- infrastructure-playbooks/lv-create.yml | 6 ++++++ .../vars/{lv_vars.yaml => lv_vars.yaml.sample} | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) rename infrastructure-playbooks/vars/{lv_vars.yaml => lv_vars.yaml.sample} (99%) diff --git a/infrastructure-playbooks/lv-create.yml b/infrastructure-playbooks/lv-create.yml index b1c87d525..2bd75984d 100644 --- a/infrastructure-playbooks/lv-create.yml +++ b/infrastructure-playbooks/lv-create.yml @@ -27,6 +27,12 @@ file: lv_vars.yaml failed_when: false + # ensure nvme_device is set + - name: fail if nvme_device is not undefined + fail: + msg: "nvme_device has not been set by the user" + when: nvme_device is undefined or nvme_device == 'dummy' + # need to check if lvm2 is installed - name: install lvm2 package: diff --git a/infrastructure-playbooks/vars/lv_vars.yaml b/infrastructure-playbooks/vars/lv_vars.yaml.sample similarity index 99% rename from infrastructure-playbooks/vars/lv_vars.yaml rename to infrastructure-playbooks/vars/lv_vars.yaml.sample index 314e0eafa..ba618a10a 100644 --- a/infrastructure-playbooks/vars/lv_vars.yaml +++ b/infrastructure-playbooks/vars/lv_vars.yaml.sample @@ -12,7 +12,7 @@ # This can be done by running `wipefs -a $device_name`. # Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case. -nvme_device: /dev/nvme0n1 +nvme_device: dummy # Path of hdd devices designated for LV creation. hdd_devices: