mirror of https://github.com/ceph/ceph-ansible.git
library: add ceph_osd_flag module
This adds ceph_osd_flag ansible module for replacing the command module
usage with the ceph osd set/unset commands.
Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 5da593604a
)
pull/6146/head
parent
1f1ca3ec8a
commit
d4024eddbb
|
@ -346,8 +346,12 @@
|
|||
tasks_from: container_binary.yml
|
||||
|
||||
- name: set osd flags
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set {{ item }}"
|
||||
changed_when: false
|
||||
ceph_osd_flag:
|
||||
name: "{{ item }}"
|
||||
cluster: "{{ cluster }}"
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
with_items:
|
||||
- noout
|
||||
- nodeep-scrub
|
||||
|
@ -447,14 +451,14 @@
|
|||
name: ceph-facts
|
||||
tasks_from: container_binary.yml
|
||||
|
||||
- name: set_fact container_exec_cmd_osd
|
||||
set_fact:
|
||||
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
|
||||
when: containerized_deployment | bool
|
||||
|
||||
- name: unset osd flags
|
||||
command: "{{ container_exec_cmd_update_osd | default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
|
||||
changed_when: false
|
||||
ceph_osd_flag:
|
||||
name: "{{ item }}"
|
||||
cluster: "{{ cluster }}"
|
||||
state: absent
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
with_items:
|
||||
- noout
|
||||
- nodeep-scrub
|
||||
|
|
|
@ -212,8 +212,12 @@
|
|||
tasks_from: container_binary.yml
|
||||
|
||||
- name: set osd flags
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set {{ item }}"
|
||||
changed_when: false
|
||||
ceph_osd_flag:
|
||||
name: "{{ item }}"
|
||||
cluster: "{{ cluster }}"
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
with_items:
|
||||
- noout
|
||||
- nodeep-scrub
|
||||
|
@ -359,8 +363,13 @@
|
|||
tasks_from: container_binary.yml
|
||||
|
||||
- name: set osd flags
|
||||
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset {{ item }}"
|
||||
changed_when: false
|
||||
ceph_osd_flag:
|
||||
name: "{{ item }}"
|
||||
cluster: "{{ cluster }}"
|
||||
state: absent
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
with_items:
|
||||
- noout
|
||||
- nodeep-scrub
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
# Copyright 2020, Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import os
|
||||
import datetime
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ceph_osd_flag
|
||||
short_description: Manage Ceph OSD flag
|
||||
version_added: "2.8"
|
||||
description:
|
||||
- Manage Ceph OSD flag
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- name of the ceph OSD flag.
|
||||
required: true
|
||||
choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']
|
||||
cluster:
|
||||
description:
|
||||
- The ceph cluster name.
|
||||
required: false
|
||||
default: ceph
|
||||
state:
|
||||
description:
|
||||
- If 'present' is used, the module sets the OSD flag.
|
||||
If 'absent' is used, the module will unset the OSD flag.
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
author:
|
||||
- Dimitri Savineau <dsavinea@redhat.com>
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: set noup OSD flag
|
||||
ceph_osd_flag:
|
||||
name: noup
|
||||
|
||||
- name: unset multiple OSD flags
|
||||
ceph_osd_flag:
|
||||
name: '{{ item }}'
|
||||
state: absent
|
||||
loop:
|
||||
- 'noup'
|
||||
- 'norebalance'
|
||||
'''
|
||||
|
||||
RETURN = '''# '''
|
||||
|
||||
|
||||
def exit_module(module, out, rc, cmd, err, startd, changed=False):
|
||||
endd = datetime.datetime.now()
|
||||
delta = endd - startd
|
||||
|
||||
result = dict(
|
||||
cmd=cmd,
|
||||
start=str(startd),
|
||||
end=str(endd),
|
||||
delta=str(delta),
|
||||
rc=rc,
|
||||
stdout=out.rstrip("\r\n"),
|
||||
stderr=err.rstrip("\r\n"),
|
||||
changed=changed,
|
||||
)
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def container_exec(binary, container_image):
|
||||
'''
|
||||
Build the docker CLI to run a command inside a container
|
||||
'''
|
||||
|
||||
container_binary = os.getenv('CEPH_CONTAINER_BINARY')
|
||||
command_exec = [container_binary,
|
||||
'run',
|
||||
'--rm',
|
||||
'--net=host',
|
||||
'-v', '/etc/ceph:/etc/ceph:z',
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=' + binary, container_image]
|
||||
return command_exec
|
||||
|
||||
|
||||
def is_containerized():
|
||||
'''
|
||||
Check if we are running on a containerized cluster
|
||||
'''
|
||||
|
||||
if 'CEPH_CONTAINER_IMAGE' in os.environ:
|
||||
container_image = os.getenv('CEPH_CONTAINER_IMAGE')
|
||||
else:
|
||||
container_image = None
|
||||
|
||||
return container_image
|
||||
|
||||
|
||||
def pre_generate_ceph_cmd(container_image=None):
|
||||
'''
|
||||
Generate ceph prefix comaand
|
||||
'''
|
||||
if container_image:
|
||||
cmd = container_exec('ceph', container_image)
|
||||
else:
|
||||
cmd = ['ceph']
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def generate_ceph_cmd(sub_cmd, args, user_key=None, cluster='ceph', user='client.admin', container_image=None):
|
||||
'''
|
||||
Generate 'ceph' command line to execute
|
||||
'''
|
||||
|
||||
if not user_key:
|
||||
user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user)
|
||||
|
||||
cmd = pre_generate_ceph_cmd(container_image=container_image)
|
||||
|
||||
base_cmd = [
|
||||
'-n',
|
||||
user,
|
||||
'-k',
|
||||
user_key,
|
||||
'--cluster',
|
||||
cluster
|
||||
]
|
||||
base_cmd.extend(sub_cmd)
|
||||
cmd.extend(base_cmd + args)
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']),
|
||||
cluster=dict(type='str', required=False, default='ceph'),
|
||||
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params.get('name')
|
||||
cluster = module.params.get('cluster')
|
||||
state = module.params.get('state')
|
||||
|
||||
startd = datetime.datetime.now()
|
||||
|
||||
container_image = is_containerized()
|
||||
|
||||
if state == 'present':
|
||||
cmd = generate_ceph_cmd(['osd', 'set'], [name], cluster=cluster, container_image=container_image)
|
||||
else:
|
||||
cmd = generate_ceph_cmd(['osd', 'unset'], [name], cluster=cluster, container_image=container_image)
|
||||
|
||||
if module.check_mode:
|
||||
exit_module(
|
||||
module=module,
|
||||
out='',
|
||||
rc=0,
|
||||
cmd=cmd,
|
||||
err='',
|
||||
startd=startd,
|
||||
changed=False
|
||||
)
|
||||
else:
|
||||
rc, out, err = module.run_command(cmd)
|
||||
exit_module(
|
||||
module=module,
|
||||
out=out,
|
||||
rc=rc,
|
||||
cmd=cmd,
|
||||
err=err,
|
||||
startd=startd,
|
||||
changed=True
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -4,10 +4,15 @@
|
|||
_osd_handler_called: True
|
||||
|
||||
- name: unset noup flag
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd unset noup"
|
||||
ceph_osd_flag:
|
||||
name: noup
|
||||
cluster: "{{ cluster }}"
|
||||
state: absent
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: true
|
||||
changed_when: False
|
||||
|
||||
# This does not just restart OSDs but everything else too. Unfortunately
|
||||
# at this time the ansible role does not have an OSD id list to use
|
||||
|
|
|
@ -39,10 +39,14 @@
|
|||
include_tasks: common.yml
|
||||
|
||||
- name: set noup flag
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd set noup"
|
||||
ceph_osd_flag:
|
||||
name: noup
|
||||
cluster: "{{ cluster }}"
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
run_once: True
|
||||
changed_when: False
|
||||
when:
|
||||
- not rolling_update | default(False) | bool
|
||||
- not switch_to_containers | default(False) | bool
|
||||
|
@ -67,9 +71,14 @@
|
|||
include_tasks: start_osds.yml
|
||||
|
||||
- name: unset noup flag
|
||||
command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd unset noup"
|
||||
ceph_osd_flag:
|
||||
name: noup
|
||||
cluster: "{{ cluster }}"
|
||||
state: absent
|
||||
environment:
|
||||
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
|
||||
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
|
||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||
changed_when: False
|
||||
when:
|
||||
- not rolling_update | default(False) | bool
|
||||
- not switch_to_containers | default(False) | bool
|
||||
|
|
|
@ -0,0 +1,184 @@
|
|||
from mock.mock import patch
|
||||
from ansible.module_utils import basic
|
||||
from ansible.module_utils._text import to_bytes
|
||||
import os
|
||||
import json
|
||||
import pytest
|
||||
import ceph_osd_flag
|
||||
|
||||
fake_cluster = 'ceph'
|
||||
fake_container_binary = 'podman'
|
||||
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
|
||||
fake_flag = 'noup'
|
||||
fake_user = 'client.admin'
|
||||
fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
|
||||
invalid_flag = 'nofoo'
|
||||
|
||||
|
||||
def set_module_args(args):
|
||||
if '_ansible_remote_tmp' not in args:
|
||||
args['_ansible_remote_tmp'] = '/tmp'
|
||||
if '_ansible_keep_remote_files' not in args:
|
||||
args['_ansible_keep_remote_files'] = False
|
||||
|
||||
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
|
||||
basic._ANSIBLE_ARGS = to_bytes(args)
|
||||
|
||||
|
||||
class AnsibleExitJson(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleFailJson(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def exit_json(*args, **kwargs):
|
||||
raise AnsibleExitJson(kwargs)
|
||||
|
||||
|
||||
def fail_json(*args, **kwargs):
|
||||
raise AnsibleFailJson(kwargs)
|
||||
|
||||
|
||||
class TestCephOSDFlagModule(object):
|
||||
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
|
||||
def test_without_parameters(self, m_fail_json):
|
||||
set_module_args({})
|
||||
m_fail_json.side_effect = fail_json
|
||||
|
||||
with pytest.raises(AnsibleFailJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert result['msg'] == 'missing required arguments: name'
|
||||
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
|
||||
def test_with_invalid_flag(self, m_fail_json):
|
||||
set_module_args({
|
||||
'name': invalid_flag,
|
||||
})
|
||||
m_fail_json.side_effect = fail_json
|
||||
|
||||
with pytest.raises(AnsibleFailJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert result['msg'] == ('value of name must be one of: noup, nodown, '
|
||||
'noout, nobackfill, norebalance, norecover, '
|
||||
'noscrub, nodeep-scrub, got: {}'.format(invalid_flag))
|
||||
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
|
||||
def test_with_check_mode(self, m_exit_json):
|
||||
set_module_args({
|
||||
'name': fake_flag,
|
||||
'_ansible_check_mode': True
|
||||
})
|
||||
m_exit_json.side_effect = exit_json
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert not result['changed']
|
||||
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
|
||||
assert result['rc'] == 0
|
||||
assert not result['stdout']
|
||||
assert not result['stderr']
|
||||
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
|
||||
def test_with_failure(self, m_run_command, m_exit_json):
|
||||
set_module_args({
|
||||
'name': fake_flag
|
||||
})
|
||||
m_exit_json.side_effect = exit_json
|
||||
stdout = ''
|
||||
stderr = 'Error EINVAL: invalid command'
|
||||
rc = 22
|
||||
m_run_command.return_value = rc, stdout, stderr
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert result['changed']
|
||||
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
|
||||
assert result['rc'] == rc
|
||||
assert result['stderr'] == stderr
|
||||
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
|
||||
def test_set_flag(self, m_run_command, m_exit_json):
|
||||
set_module_args({
|
||||
'name': fake_flag,
|
||||
})
|
||||
m_exit_json.side_effect = exit_json
|
||||
stdout = ''
|
||||
stderr = '{} is set'.format(fake_flag)
|
||||
rc = 0
|
||||
m_run_command.return_value = rc, stdout, stderr
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert result['changed']
|
||||
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
|
||||
assert result['rc'] == rc
|
||||
assert result['stderr'] == stderr
|
||||
assert result['stdout'] == stdout
|
||||
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
|
||||
def test_unset_flag(self, m_run_command, m_exit_json):
|
||||
set_module_args({
|
||||
'name': fake_flag,
|
||||
'state': 'absent'
|
||||
})
|
||||
m_exit_json.side_effect = exit_json
|
||||
stdout = ''
|
||||
stderr = '{} is unset'.format(fake_flag)
|
||||
rc = 0
|
||||
m_run_command.return_value = rc, stdout, stderr
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert result['changed']
|
||||
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'unset', fake_flag]
|
||||
assert result['rc'] == rc
|
||||
assert result['stderr'] == stderr
|
||||
assert result['stdout'] == stdout
|
||||
|
||||
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
|
||||
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
|
||||
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
|
||||
def test_with_container(self, m_run_command, m_exit_json):
|
||||
set_module_args({
|
||||
'name': fake_flag,
|
||||
})
|
||||
m_exit_json.side_effect = exit_json
|
||||
stdout = ''
|
||||
stderr = '{} is set'.format(fake_flag)
|
||||
rc = 0
|
||||
m_run_command.return_value = rc, stdout, stderr
|
||||
|
||||
with pytest.raises(AnsibleExitJson) as result:
|
||||
ceph_osd_flag.main()
|
||||
|
||||
result = result.value.args[0]
|
||||
assert result['changed']
|
||||
assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host',
|
||||
'-v', '/etc/ceph:/etc/ceph:z',
|
||||
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
|
||||
'-v', '/var/log/ceph/:/var/log/ceph/:z',
|
||||
'--entrypoint=ceph', fake_container_image,
|
||||
'-n', fake_user, '-k', fake_keyring,
|
||||
'--cluster', fake_cluster, 'osd', 'set', fake_flag]
|
||||
assert result['rc'] == rc
|
||||
assert result['stderr'] == stderr
|
||||
assert result['stdout'] == stdout
|
Loading…
Reference in New Issue