diff --git a/ansible.cfg b/ansible.cfg index c579a576b..2a844eddd 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -4,6 +4,7 @@ [defaults] ansible_managed = Please do not change this file directly since it is managed by Ansible and will be overwritten library = ./library +module_utils = ./module_utils action_plugins = plugins/actions callback_plugins = plugins/callback filter_plugins = plugins/filter diff --git a/library/ceph_dashboard_user.py b/library/ceph_dashboard_user.py index b2101ccc5..929c0d71d 100644 --- a/library/ceph_dashboard_user.py +++ b/library/ceph_dashboard_user.py @@ -16,9 +16,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_ceph_cmd, \ + is_containerized, \ + exec_command, \ + exit_module +except ImportError: + from module_utils.ca_common import generate_ceph_cmd, is_containerized, exec_command, exit_module + import datetime import json -import os ANSIBLE_METADATA = { @@ -105,76 +112,6 @@ EXAMPLES = ''' RETURN = '''# ''' -def container_exec(binary, container_image): - ''' - Build the docker CLI to run a command inside a container - ''' - - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, - 'run', - '--rm', - '--net=host', - '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image] - return command_exec - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - -def pre_generate_ceph_cmd(container_image=None): - ''' - Generate ceph prefix comaand - ''' - if container_image: - cmd = container_exec('ceph', container_image) - else: - cmd = ['ceph'] - - return cmd - - -def generate_ceph_cmd(cluster, args, container_image=None): - ''' - Generate 'ceph' command line to execute - ''' - - cmd = pre_generate_ceph_cmd(container_image=container_image) - - base_cmd = [ - '--cluster', - cluster, - 'dashboard' - ] - - cmd.extend(base_cmd + args) - - return cmd - - -def exec_commands(module, cmd): - ''' - Execute command(s) - ''' - - rc, out, err = module.run_command(cmd) - - return rc, cmd, out, err - - def create_user(module, container_image=None): ''' Create a new user @@ -186,7 +123,7 @@ def create_user(module, container_image=None): args = ['ac-user-create', name, password] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image) return cmd @@ -204,7 +141,7 @@ def set_roles(module, container_image=None): args.extend(roles) - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image) return cmd @@ -220,7 +157,7 @@ def set_password(module, container_image=None): args = ['ac-user-set-password', name, password] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image) return cmd @@ -235,7 +172,7 @@ def get_user(module, container_image=None): args = ['ac-user-show', name, '--format=json'] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image) return cmd @@ -250,28 +187,11 @@ def remove_user(module, container_image=None): args = ['ac-user-delete', name] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['dashboard'], args=args, container_image=container_image) return cmd -def exit_module(module, out, rc, cmd, err, startd, changed=False): - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - start=str(startd), - end=str(endd), - delta=str(delta), - rc=rc, - stdout=out.rstrip("\r\n"), - stderr=err.rstrip("\r\n"), - changed=changed, - ) - module.exit_json(**result) - - def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), @@ -313,31 +233,31 @@ def run_module(): container_image = is_containerized() if state == "present": - rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) if rc == 0: user = json.loads(out) user['roles'].sort() roles.sort() if user['roles'] != roles: - rc, cmd, out, err = exec_commands(module, set_roles(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) changed = True - rc, cmd, out, err = exec_commands(module, set_password(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, set_password(module, container_image=container_image)) else: - rc, cmd, out, err = exec_commands(module, create_user(module, container_image=container_image)) - rc, cmd, out, err = exec_commands(module, set_roles(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, create_user(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) changed = True elif state == "absent": - rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) if rc == 0: - rc, cmd, out, err = exec_commands(module, remove_user(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, remove_user(module, container_image=container_image)) changed = True else: rc = 0 out = "Dashboard User {} doesn't exist".format(name) elif state == "info": - rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) diff --git a/library/ceph_fs.py b/library/ceph_fs.py index d16210307..3d676583b 100644 --- a/library/ceph_fs.py +++ b/library/ceph_fs.py @@ -16,9 +16,19 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import is_containerized, \ + exec_command, \ + generate_ceph_cmd, \ + exit_module +except ImportError: + from module_utils.ca_common import is_containerized, \ + exec_command, \ + generate_ceph_cmd, \ + exit_module + import datetime import json -import os ANSIBLE_METADATA = { @@ -97,76 +107,6 @@ EXAMPLES = ''' RETURN = '''# ''' -def container_exec(binary, container_image): - ''' - Build the docker CLI to run a command inside a container - ''' - - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, - 'run', - '--rm', - '--net=host', - '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image] - return command_exec - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - -def pre_generate_ceph_cmd(container_image=None): - ''' - Generate ceph prefix comaand - ''' - if container_image: - cmd = container_exec('ceph', container_image) - else: - cmd = ['ceph'] - - return cmd - - -def generate_ceph_cmd(cluster, args, container_image=None): - ''' - Generate 'ceph' command line to execute - ''' - - cmd = pre_generate_ceph_cmd(container_image=container_image) - - base_cmd = [ - '--cluster', - cluster, - 'fs' - ] - - cmd.extend(base_cmd + args) - - return cmd - - -def exec_commands(module, cmd): - ''' - Execute command(s) - ''' - - rc, out, err = module.run_command(cmd) - - return rc, cmd, out, err - - def create_fs(module, container_image=None): ''' Create a new fs @@ -179,7 +119,7 @@ def create_fs(module, container_image=None): args = ['new', name, metadata, data] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image) return cmd @@ -194,7 +134,7 @@ def get_fs(module, container_image=None): args = ['get', name, '--format=json'] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image) return cmd @@ -209,7 +149,7 @@ def remove_fs(module, container_image=None): args = ['rm', name, '--yes-i-really-mean-it'] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image) return cmd @@ -224,7 +164,7 @@ def fail_fs(module, container_image=None): args = ['fail', name] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image) return cmd @@ -240,28 +180,11 @@ def set_fs(module, container_image=None): args = ['set', name, 'max_mds', str(max_mds)] - cmd = generate_ceph_cmd(cluster=cluster, args=args, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, sub_cmd=['fs'], args=args, container_image=container_image) return cmd -def exit_module(module, out, rc, cmd, err, startd, changed=False): - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - start=str(startd), - end=str(endd), - delta=str(delta), - rc=rc, - stdout=out.rstrip("\r\n"), - stderr=err.rstrip("\r\n"), - changed=changed, - ) - module.exit_json(**result) - - def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), @@ -301,25 +224,25 @@ def run_module(): container_image = is_containerized() if state == "present": - rc, cmd, out, err = exec_commands(module, get_fs(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) if rc == 0: fs = json.loads(out) if max_mds and fs["mdsmap"]["max_mds"] != max_mds: - rc, cmd, out, err = exec_commands(module, set_fs(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, set_fs(module, container_image=container_image)) if rc == 0: changed = True else: - rc, cmd, out, err = exec_commands(module, create_fs(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, create_fs(module, container_image=container_image)) if max_mds and max_mds > 1: - exec_commands(module, set_fs(module, container_image=container_image)) + exec_command(module, set_fs(module, container_image=container_image)) if rc == 0: changed = True elif state == "absent": - rc, cmd, out, err = exec_commands(module, get_fs(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) if rc == 0: - exec_commands(module, fail_fs(module, container_image=container_image)) - rc, cmd, out, err = exec_commands(module, remove_fs(module, container_image=container_image)) + exec_command(module, fail_fs(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, remove_fs(module, container_image=container_image)) if rc == 0: changed = True else: @@ -327,7 +250,7 @@ def run_module(): out = "Ceph File System {} doesn't exist".format(name) elif state == "info": - rc, cmd, out, err = exec_commands(module, get_fs(module, container_image=container_image)) + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) diff --git a/library/ceph_key.py b/library/ceph_key.py index e87ac2259..5e3092eb5 100644 --- a/library/ceph_key.py +++ b/library/ceph_key.py @@ -18,6 +18,10 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import is_containerized, container_exec +except ImportError: + from module_utils.ca_common import is_containerized, container_exec import datetime import json import os @@ -225,36 +229,6 @@ def fatal(message, module): raise(Exception(message)) -def container_exec(binary, container_image): - ''' - Build the docker CLI to run a command inside a container - ''' - - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, - 'run', - '--rm', - '--net=host', - '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image] - return command_exec - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - def generate_secret(): ''' Generate a CephX secret diff --git a/library/ceph_pool.py b/library/ceph_pool.py index 31ed193a7..0562a793e 100644 --- a/library/ceph_pool.py +++ b/library/ceph_pool.py @@ -18,6 +18,20 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_ceph_cmd, \ + pre_generate_ceph_cmd, \ + is_containerized, \ + exec_command, \ + exit_module +except ImportError: + from module_utils.ca_common import generate_ceph_cmd, \ + pre_generate_ceph_cmd, \ + is_containerized, \ + exec_command, \ + exit_module + + import datetime import json import os @@ -141,81 +155,6 @@ pools: RETURN = '''# ''' -def container_exec(binary, container_image): - ''' - Build the docker CLI to run a command inside a container - ''' - - container_binary = os.getenv('CEPH_CONTAINER_BINARY') - command_exec = [container_binary, - 'run', - '--rm', - '--net=host', - '-v', '/etc/ceph:/etc/ceph:z', - '-v', '/var/lib/ceph/:/var/lib/ceph/:z', - '-v', '/var/log/ceph/:/var/log/ceph/:z', - '--entrypoint=' + binary, container_image] - return command_exec - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - -def pre_generate_ceph_cmd(container_image=None): - if container_image: - binary = 'ceph' - cmd = container_exec( - binary, container_image) - else: - binary = ['ceph'] - cmd = binary - - return cmd - - -def generate_ceph_cmd(cluster, args, user, user_key, container_image=None): - ''' - Generate 'ceph' command line to execute - ''' - - cmd = pre_generate_ceph_cmd(container_image=container_image) - - base_cmd = [ - '-n', - user, - '-k', - user_key, - '--cluster', - cluster, - 'osd', - 'pool' - ] - - cmd.extend(base_cmd + args) - - return cmd - - -def exec_commands(module, cmd): - ''' - Execute command(s) - ''' - - rc, out, err = module.run_command(cmd) - - return rc, cmd, out, err - - def check_pool_exist(cluster, name, user, @@ -229,6 +168,7 @@ def check_pool_exist(cluster, args = ['stats', name, '-f', output_format] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -272,6 +212,7 @@ def get_application_pool(cluster, args = ['application', 'get', name, '-f', output_format] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -293,6 +234,7 @@ def enable_application_pool(cluster, args = ['application', 'enable', name, application] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -315,6 +257,7 @@ def disable_application_pool(cluster, application, '--yes-i-really-mean-it'] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -337,22 +280,23 @@ def get_pool_details(module, args = ['ls', 'detail', '-f', output_format] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, container_image=container_image) - rc, cmd, out, err = exec_commands(module, cmd) + rc, cmd, out, err = exec_command(module, cmd) if rc == 0: out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0] - _rc, _cmd, application_pool, _err = exec_commands(module, - get_application_pool(cluster, # noqa: E501 - name, # noqa: E501 - user, # noqa: E501 - user_key, # noqa: E501 - container_image=container_image)) # noqa: E501 + _rc, _cmd, application_pool, _err = exec_command(module, + get_application_pool(cluster, # noqa: E501 + name, # noqa: E501 + user, # noqa: E501 + user_key, # noqa: E501 + container_image=container_image)) # noqa: E501 # This is a trick because "target_size_ratio" isn't present at the same level in the dict # ie: @@ -424,6 +368,7 @@ def list_pools(cluster, args.extend(['-f', output_format]) cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -474,6 +419,7 @@ def create_pool(cluster, user_pool_config['pg_autoscale_mode']['value']]) cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -490,6 +436,7 @@ def remove_pool(cluster, name, user, user_key, container_image=None): args = ['rm', name, name, '--yes-i-really-really-mean-it'] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, @@ -514,21 +461,22 @@ def update_pool(module, cluster, name, delta[key]['value']] cmd = generate_ceph_cmd(cluster=cluster, + sub_cmd=['osd', 'pool'], args=args, user=user, user_key=user_key, container_image=container_image) - rc, cmd, out, err = exec_commands(module, cmd) + rc, cmd, out, err = exec_command(module, cmd) if rc != 0: return rc, cmd, out, err else: - rc, cmd, out, err = exec_commands(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501 if rc != 0: return rc, cmd, out, err - rc, cmd, out, err = exec_commands(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501 if rc != 0: return rc, cmd, out, err @@ -538,23 +486,6 @@ def update_pool(module, cluster, name, return rc, cmd, out, err -def exit_module(module, out, rc, cmd, err, startd, changed=False): - endd = datetime.datetime.now() - delta = endd - startd - - result = dict( - cmd=cmd, - start=str(startd), - end=str(endd), - delta=str(delta), - rc=rc, - stdout=out.rstrip("\r\n"), - stderr=err.rstrip("\r\n"), - changed=changed, - ) - module.exit_json(**result) - - def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), @@ -656,12 +587,12 @@ def run_module(): user_key = os.path.join("/etc/ceph/", keyring_filename) if state == "present": - rc, cmd, out, err = exec_commands(module, - check_pool_exist(cluster, - name, - user, - user_key, - container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, + check_pool_exist(cluster, + name, + user, + user_key, + container_image=container_image)) # noqa: E501 if rc == 0: running_pool_details = get_pool_details(module, cluster, @@ -696,49 +627,49 @@ def run_module(): else: out = "Pool {} already exists and there is nothing to update.".format(name) # noqa: E501 else: - rc, cmd, out, err = exec_commands(module, - create_pool(cluster, - name, - user, - user_key, - user_pool_config=user_pool_config, # noqa: E501 - container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, + create_pool(cluster, + name, + user, + user_key, + user_pool_config=user_pool_config, # noqa: E501 + container_image=container_image)) # noqa: E501 if user_pool_config['application']['value']: - rc, _, _, _ = exec_commands(module, - enable_application_pool(cluster, - name, - user_pool_config['application']['value'], # noqa: E501 - user, - user_key, - container_image=container_image)) # noqa: E501 + rc, _, _, _ = exec_command(module, + enable_application_pool(cluster, + name, + user_pool_config['application']['value'], # noqa: E501 + user, + user_key, + container_image=container_image)) # noqa: E501 if user_pool_config['min_size']['value']: # not implemented yet pass changed = True elif state == "list": - rc, cmd, out, err = exec_commands(module, - list_pools(cluster, - name, user, - user_key, - details, - container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, + list_pools(cluster, + name, user, + user_key, + details, + container_image=container_image)) # noqa: E501 if rc != 0: out = "Couldn't list pool(s) present on the cluster" elif state == "absent": - rc, cmd, out, err = exec_commands(module, - check_pool_exist(cluster, - name, user, - user_key, - container_image=container_image)) # noqa: E501 - if rc == 0: - rc, cmd, out, err = exec_commands(module, - remove_pool(cluster, - name, - user, + rc, cmd, out, err = exec_command(module, + check_pool_exist(cluster, + name, user, user_key, container_image=container_image)) # noqa: E501 + if rc == 0: + rc, cmd, out, err = exec_command(module, + remove_pool(cluster, + name, + user, + user_key, + container_image=container_image)) # noqa: E501 changed = True else: rc = 0 diff --git a/library/ceph_volume.py b/library/ceph_volume.py index f35d92acf..0787c5fe1 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -1,6 +1,10 @@ #!/usr/bin/python from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exec_command, is_containerized +except ImportError: + from module_utils.ca_common import exec_command, is_containerized import datetime import copy import json @@ -238,28 +242,6 @@ def build_cmd(action, container_image, cluster='ceph', binary='ceph-volume'): return cmd -def exec_command(module, cmd): - ''' - Execute command - ''' - - rc, out, err = module.run_command(cmd) - return rc, cmd, out, err - - -def is_containerized(): - ''' - Check if we are running on a containerized cluster - ''' - - if 'CEPH_CONTAINER_IMAGE' in os.environ: - container_image = os.getenv('CEPH_CONTAINER_IMAGE') - else: - container_image = None - - return container_image - - def get_data(data, data_vg): if data_vg: data = '{0}/{1}'.format(data_vg, data) diff --git a/module_utils/__init__.py b/module_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/module_utils/ca_common.py b/module_utils/ca_common.py new file mode 100644 index 000000000..3ad9905fb --- /dev/null +++ b/module_utils/ca_common.py @@ -0,0 +1,92 @@ +import os +import datetime + + +def generate_ceph_cmd(cluster, sub_cmd, args, user='client.admin', user_key='/etc/ceph/ceph.client.admin.keyring', container_image=None): + ''' + Generate 'ceph' command line to execute + ''' + + cmd = pre_generate_ceph_cmd(container_image=container_image) + + base_cmd = [ + '-n', + user, + '-k', + user_key, + '--cluster', + cluster + ] + base_cmd.extend(sub_cmd) + cmd.extend(base_cmd + args) + + return cmd + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_ceph_cmd(container_image=None): + ''' + Generate ceph prefix comaand + ''' + if container_image: + cmd = container_exec('ceph', container_image) + else: + cmd = ['ceph'] + + return cmd + + +def exec_command(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) diff --git a/tests/library/test_ca_common.py b/tests/library/test_ca_common.py new file mode 100644 index 000000000..f52fdbd11 --- /dev/null +++ b/tests/library/test_ca_common.py @@ -0,0 +1,65 @@ +from mock.mock import patch +import os +import ca_common +import pytest + +fake_binary = 'ceph' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] + + +class TestCommon(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = ca_common.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert ca_common.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert ca_common.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_ceph_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert ca_common.pre_generate_ceph_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_ceph_cmd(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + fake_cluster, + 'osd', 'pool', + 'create', 'foo' + ]) + assert ca_common.generate_ceph_cmd(fake_cluster, sub_cmd, args, container_image=image) == expected_cmd diff --git a/tests/library/test_ceph_dashboard_user.py b/tests/library/test_ceph_dashboard_user.py index 50f770444..dd4d0f72d 100644 --- a/tests/library/test_ceph_dashboard_user.py +++ b/tests/library/test_ceph_dashboard_user.py @@ -1,9 +1,5 @@ -import os -import sys -from mock.mock import patch, MagicMock -import pytest -sys.path.append('./library') -import ceph_dashboard_user # noqa: E402 +from mock.mock import MagicMock +import ceph_dashboard_user fake_binary = 'ceph' fake_cluster = 'ceph' @@ -31,48 +27,13 @@ fake_params = {'cluster': fake_cluster, class TestCephDashboardUserModule(object): - @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) - def test_container_exec(self): - cmd = ceph_dashboard_user.container_exec(fake_binary, fake_container_image) - assert cmd == fake_container_cmd - - def test_not_is_containerized(self): - assert ceph_dashboard_user.is_containerized() is None - - @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) - def test_is_containerized(self): - assert ceph_dashboard_user.is_containerized() == fake_container_image - - @pytest.mark.parametrize('image', [None, fake_container_image]) - @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) - def test_pre_generate_ceph_cmd(self, image): - if image: - expected_cmd = fake_container_cmd - else: - expected_cmd = [fake_binary] - - assert ceph_dashboard_user.pre_generate_ceph_cmd(image) == expected_cmd - - @pytest.mark.parametrize('image', [None, fake_container_image]) - @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) - def test_generate_ceph_cmd(self, image): - if image: - expected_cmd = fake_container_cmd - else: - expected_cmd = [fake_binary] - - expected_cmd.extend([ - '--cluster', - fake_cluster, - 'dashboard' - ]) - assert ceph_dashboard_user.generate_ceph_cmd(fake_cluster, [], image) == expected_cmd - def test_create_user(self): fake_module = MagicMock() fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'dashboard', 'ac-user-create', fake_user, @@ -86,6 +47,8 @@ class TestCephDashboardUserModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'dashboard', 'ac-user-set-roles', fake_user @@ -99,6 +62,8 @@ class TestCephDashboardUserModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'dashboard', 'ac-user-set-password', fake_user, @@ -112,6 +77,8 @@ class TestCephDashboardUserModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'dashboard', 'ac-user-show', fake_user, @@ -125,6 +92,8 @@ class TestCephDashboardUserModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'dashboard', 'ac-user-delete', fake_user diff --git a/tests/library/test_ceph_fs.py b/tests/library/test_ceph_fs.py index 15ab3cdd9..f18b506c1 100644 --- a/tests/library/test_ceph_fs.py +++ b/tests/library/test_ceph_fs.py @@ -1,9 +1,5 @@ -import os -import sys -from mock.mock import patch, MagicMock -import pytest -sys.path.append('./library') -import ceph_fs # noqa : E402 +from mock.mock import MagicMock +import ceph_fs fake_binary = 'ceph' @@ -34,48 +30,13 @@ fake_params = {'cluster': fake_cluster, class TestCephFsModule(object): - @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) - def test_container_exec(self): - cmd = ceph_fs.container_exec(fake_binary, fake_container_image) - assert cmd == fake_container_cmd - - def test_not_is_containerized(self): - assert ceph_fs.is_containerized() is None - - @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) - def test_is_containerized(self): - assert ceph_fs.is_containerized() == fake_container_image - - @pytest.mark.parametrize('image', [None, fake_container_image]) - @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) - def test_pre_generate_ceph_cmd(self, image): - if image: - expected_cmd = fake_container_cmd - else: - expected_cmd = [fake_binary] - - assert ceph_fs.pre_generate_ceph_cmd(image) == expected_cmd - - @pytest.mark.parametrize('image', [None, fake_container_image]) - @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) - def test_generate_ceph_cmd(self, image): - if image: - expected_cmd = fake_container_cmd - else: - expected_cmd = [fake_binary] - - expected_cmd.extend([ - '--cluster', - fake_cluster, - 'fs' - ]) - assert ceph_fs.generate_ceph_cmd(fake_cluster, [], image) == expected_cmd - def test_create_fs(self): fake_module = MagicMock() fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'fs', 'new', fake_fs, @@ -90,6 +51,8 @@ class TestCephFsModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'fs', 'set', fake_fs, @@ -104,6 +67,8 @@ class TestCephFsModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'fs', 'get', fake_fs, @@ -117,6 +82,8 @@ class TestCephFsModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'fs', 'rm', fake_fs, @@ -130,6 +97,8 @@ class TestCephFsModule(object): fake_module.params = fake_params expected_cmd = [ fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', '--cluster', fake_cluster, 'fs', 'fail', fake_fs diff --git a/tests/library/test_ceph_key.py b/tests/library/test_ceph_key.py index 87c7ccac1..100b58564 100644 --- a/tests/library/test_ceph_key.py +++ b/tests/library/test_ceph_key.py @@ -5,9 +5,7 @@ import mock import pytest from ansible.module_utils import basic from ansible.module_utils._text import to_bytes - -sys.path.append('./library') -import ceph_key # noqa: E402 +import ceph_key # From ceph-ansible documentation