From 32a2f04cbc288d79e9a228593be854469a0ecefb Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sun, 6 Sep 2020 10:17:02 +0800 Subject: [PATCH] library: flake8 ceph-ansible modules This commit ensure all ceph-ansible modules pass flake8 properly. Signed-off-by: Wong Hoi Sing Edison Co-authored-by: Guillaume Abrioux (cherry picked from commit 268a39ca0e8698dff9faec1b558d1d99006215aa) --- library/ceph_add_users_buckets.py | 98 +++++++++--------- library/ceph_crush.py | 19 ++-- library/ceph_dashboard_user.py | 5 +- library/ceph_key.py | 15 +-- library/ceph_pool.py | 164 +++++++++++++++--------------- library/ceph_volume.py | 20 ++-- library/igw_lun.py | 4 +- library/igw_purge.py | 15 +-- 8 files changed, 171 insertions(+), 169 deletions(-) diff --git a/library/ceph_add_users_buckets.py b/library/ceph_add_users_buckets.py index 346cec80a..73f3afa45 100644 --- a/library/ceph_add_users_buckets.py +++ b/library/ceph_add_users_buckets.py @@ -3,7 +3,12 @@ # Copyright 2018 Daniel Pivonka # Copyright 2018 Red Hat, Inc. # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# GNU General Public License v3.0+ + +from ansible.module_utils.basic import AnsibleModule +from socket import error as socket_error +import boto +import radosgw ANSIBLE_METADATA = { 'metadata_version': '1.1', @@ -107,7 +112,7 @@ option: default: unlimited bucketmaxobjects: description: - - with bucket quota enabled specify maximum number of objects + - with bucket quota enabled specify maximum number of objects # noqa E501 required: false default: unlimited buckets: @@ -258,7 +263,7 @@ error_messages: returned: always type: list sample: [ - "test2: could not modify user: unable to modify user, cannot add duplicate email\n" + "test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa E501 ] failed_users: @@ -287,11 +292,6 @@ added_buckets: ''' -from ansible.module_utils.basic import AnsibleModule -from socket import error as socket_error -import boto -import radosgw - def create_users(rgw, users, result): @@ -321,7 +321,7 @@ def create_users(rgw, users, result): # check if user exists try: user_info = rgw.get_user(uid=username) - except radosgw.exception.RadosGWAdminError as e: + except radosgw.exception.RadosGWAdminError: # it doesnt exist user_info = None @@ -334,36 +334,36 @@ def create_users(rgw, users, result): if email: if autogenkey: try: - rgw.create_user(username, fullname, email=email, key_type='s3', + rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501 generate_key=autogenkey, - max_buckets=maxbucket, suspended=suspend) + max_buckets=maxbucket, suspended=suspend) # noqa E501 except radosgw.exception.RadosGWAdminError as e: - result['error_messages'].append(username + ' ' + e.get_code()) + result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501 fail_flag = True else: try: - rgw.create_user(username, fullname, email=email, key_type='s3', - access_key=accesskey, secret_key=secretkey, - max_buckets=maxbucket, suspended=suspend) + rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501 + access_key=accesskey, secret_key=secretkey, # noqa E501 + max_buckets=maxbucket, suspended=suspend) # noqa E501 except radosgw.exception.RadosGWAdminError as e: - result['error_messages'].append(username + ' ' + e.get_code()) + result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501 fail_flag = True else: if autogenkey: try: rgw.create_user(username, fullname, key_type='s3', generate_key=autogenkey, - max_buckets=maxbucket, suspended=suspend) + max_buckets=maxbucket, suspended=suspend) # noqa E501 except radosgw.exception.RadosGWAdminError as e: - result['error_messages'].append(username + ' ' + e.get_code()) + result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501 fail_flag = True else: try: rgw.create_user(username, fullname, key_type='s3', - access_key=accesskey, secret_key=secretkey, - max_buckets=maxbucket, suspended=suspend) + access_key=accesskey, secret_key=secretkey, # noqa E501 + max_buckets=maxbucket, suspended=suspend) # noqa E501 except radosgw.exception.RadosGWAdminError as e: - result['error_messages'].append(username + ' ' + e.get_code()) + result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501 fail_flag = True if not fail_flag and userquota: @@ -371,21 +371,21 @@ def create_users(rgw, users, result): rgw.set_quota(username, 'user', max_objects=usermaxobjects, max_size_kb=usermaxsize, enabled=True) except radosgw.exception.RadosGWAdminError as e: - result['error_messages'].append(username + ' ' + e.get_code()) + result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501 fail_flag = True if not fail_flag and bucketquota: try: - rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, + rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa E501 max_size_kb=bucketmaxsize, enabled=True) except radosgw.exception.RadosGWAdminError as e: - result['error_messages'].append(username + ' ' + e.get_code()) + result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501 fail_flag = True if fail_flag: try: rgw.delete_user(username) - except radosgw.exception.RadosGWAdminError as e: + except radosgw.exception.RadosGWAdminError: pass failed_users.append(username) else: @@ -424,7 +424,7 @@ def create_buckets(rgw, buckets, result): # check if user exists try: user_info = rgw.get_user(uid=user) - except radosgw.exception.RadosGWAdminError as e: + except radosgw.exception.RadosGWAdminError: # it doesnt exist user_info = None @@ -439,7 +439,7 @@ def create_buckets(rgw, buckets, result): result['error_messages'].append(bucket + e.get_code()) try: rgw.delete_bucket(bucket, purge_objects=True) - except radosgw.exception.RadosGWAdminError as e: + except radosgw.exception.RadosGWAdminError: pass failed_buckets.append(bucket) @@ -447,15 +447,15 @@ def create_buckets(rgw, buckets, result): # user doesnt exist cant be link delete bucket try: rgw.delete_bucket(bucket, purge_objects=True) - except radosgw.exception.RadosGWAdminError as e: + except radosgw.exception.RadosGWAdminError: pass failed_buckets.append(bucket) - result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) + result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa E501 else: # something went wrong failed_buckets.append(bucket) - result['error_messages'].append(bucket + ' could not be created') + result['error_messages'].append(bucket + ' could not be created') # noqa E501 result['added_buckets'] = ", ".join(added_buckets) result['failed_buckets'] = ", ".join(failed_buckets) @@ -467,7 +467,7 @@ def create_bucket(rgw, bucket): host=rgw._connection[0], port=rgw.port, is_secure=rgw.is_secure, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), + calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa E501 ) try: @@ -489,23 +489,23 @@ def main(): admin_access_key=dict(type='str', required=True), admin_secret_key=dict(type='str', required=True), buckets=dict(type='list', required=False, elements='dict', - options=dict(bucket=dict(type='str', required=True), - user=dict(type='str', required=True))), + options=dict(bucket=dict(type='str', required=True), # noqa E501 + user=dict(type='str', required=True))), # noqa E501 users=dict(type='list', required=False, elements='dict', - options=dict(username=dict(type='str', required=True), - fullname=dict(type='str', required=True), - email=dict(type='str', required=False), - maxbucket=dict(type='int', required=False, default=1000), - suspend=dict(type='bool', required=False, default=False), - autogenkey=dict(type='bool', required=False, default=True), - accesskey=dict(type='str', required=False), - secretkey=dict(type='str', required=False), - userquota=dict(type='bool', required=False, default=False), - usermaxsize=dict(type='str', required=False, default='-1'), - usermaxobjects=dict(type='int', required=False, default=-1), - bucketquota=dict(type='bool', required=False, default=False), - bucketmaxsize=dict(type='str', required=False, default='-1'), - bucketmaxobjects=dict(type='int', required=False, default=-1)))) + options=dict(username=dict(type='str', required=True), # noqa E501 + fullname=dict(type='str', required=True), # noqa E501 + email=dict(type='str', required=False), # noqa E501 + maxbucket=dict(type='int', required=False, default=1000), # noqa E501 + suspend=dict(type='bool', required=False, default=False), # noqa E501 + autogenkey=dict(type='bool', required=False, default=True), # noqa E501 + accesskey=dict(type='str', required=False), # noqa E501 + secretkey=dict(type='str', required=False), # noqa E501 + userquota=dict(type='bool', required=False, default=False), # noqa E501 + usermaxsize=dict(type='str', required=False, default='-1'), # noqa E501 + usermaxobjects=dict(type='int', required=False, default=-1), # noqa E501 + bucketquota=dict(type='bool', required=False, default=False), # noqa E501 + bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa E501 + bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa E501 # the AnsibleModule object module = AnsibleModule(argument_spec=fields, @@ -533,8 +533,8 @@ def main(): # radosgw connection rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host, port=port, - access_key=admin_access_key, - secret_key=admin_secret_key, + access_key=admin_access_key, # noqa E501 + secret_key=admin_secret_key, # noqa E501 aws_signature='AWS4', is_secure=is_secure) diff --git a/library/ceph_crush.py b/library/ceph_crush.py index e9b500c98..6ada0b459 100644 --- a/library/ceph_crush.py +++ b/library/ceph_crush.py @@ -1,14 +1,14 @@ #!/usr/bin/python -# # Copyright (c) 2018 Red Hat, Inc. # -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# +# GNU General Public License v3.0+ from __future__ import absolute_import, division, print_function __metaclass__ = type +from ansible.module_utils.basic import AnsibleModule +import datetime ANSIBLE_METADATA = { 'metadata_version': '1.1', @@ -61,9 +61,6 @@ EXAMPLES = ''' RETURN = '''# ''' -from ansible.module_utils.basic import AnsibleModule -import datetime - def fatal(message, module): ''' @@ -117,9 +114,9 @@ def sort_osd_crush_location(location, module): "region", "root", ] - return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) + return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa E501 except ValueError as error: - fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) + fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa E501 def create_and_move_buckets_list(cluster, location, containerized=None): @@ -131,10 +128,10 @@ def create_and_move_buckets_list(cluster, location, containerized=None): for item in location: bucket_type, bucket_name = item # ceph osd crush add-bucket maroot root - cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) + cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa E501 if previous_bucket: # ceph osd crush move monrack root=maroot - cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) + cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa E501 previous_bucket = item[1] return cmd_list @@ -181,7 +178,7 @@ def run_module(): startd = datetime.datetime.now() # run the Ceph command to add buckets - rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) + rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) # noqa E501 endd = datetime.datetime.now() delta = endd - startd diff --git a/library/ceph_dashboard_user.py b/library/ceph_dashboard_user.py index 154effab5..db4a26740 100644 --- a/library/ceph_dashboard_user.py +++ b/library/ceph_dashboard_user.py @@ -280,7 +280,10 @@ def run_module(): name=dict(type='str', required=True), state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), password=dict(type='str', required=False, no_log=True), - roles=dict(type='list', required=False, choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'], default=[]), + roles=dict(type='list', + required=False, + choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'], + default=[]), ) module = AnsibleModule( diff --git a/library/ceph_key.py b/library/ceph_key.py index 5b57c2634..5de4c2e2e 100644 --- a/library/ceph_key.py +++ b/library/ceph_key.py @@ -1,4 +1,5 @@ #!/usr/bin/python3 + # Copyright 2018, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -189,6 +190,7 @@ def str_to_bool(val): else: raise ValueError("Invalid input value: %s" % val) + def fatal(message, module): ''' Report a fatal error and exit @@ -460,14 +462,14 @@ def lookup_ceph_initial_entities(module, out): else: fatal("'auth_dump' key not present in json output:", module) # noqa E501 - if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): + if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa E501 # must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS # it'd be in entities from the above test. Report what's missing. missing = [] for e in CEPH_INITIAL_KEYS: if e not in entities: missing.append(e) - fatal("initial keyring does not contain keys: " + ' '.join(missing), module) + fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa E501 return entities @@ -561,8 +563,8 @@ def run_module(): file_path = dest else: if 'bootstrap' in dest: - # Build a different path for bootstrap keys as there are stored as - # /var/lib/ceph/bootstrap-rbd/ceph.keyring + # Build a different path for bootstrap keys as there are stored + # as /var/lib/ceph/bootstrap-rbd/ceph.keyring keyring_filename = cluster + '.keyring' else: keyring_filename = cluster + "." + name + ".keyring" @@ -605,7 +607,7 @@ def run_module(): result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa E501 result["rc"] = 0 module.exit_json(**result) - if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: + if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa E501 rc, cmd, out, err = exec_commands(module, create_key( module, result, cluster, name, secret, caps, import_key, file_path, container_image)) # noqa E501 if rc != 0: @@ -615,7 +617,6 @@ def run_module(): module.set_fs_attributes_if_different(file_args, False) changed = True - elif state == "absent": if key_exist == 0: rc, cmd, out, err = exec_commands( @@ -645,7 +646,7 @@ def run_module(): rc, cmd, out, err = exec_commands( module, list_keys(cluster, user, user_key, container_image)) if rc != 0: - result["stdout"] = "failed to retrieve ceph keys".format(name) + result["stdout"] = "failed to retrieve ceph keys" result["sdterr"] = err result['rc'] = 0 module.exit_json(**result) diff --git a/library/ceph_pool.py b/library/ceph_pool.py index 70a0bd20a..2abce2697 100644 --- a/library/ceph_pool.py +++ b/library/ceph_pool.py @@ -1,4 +1,5 @@ #!/usr/bin/python3 + # Copyright 2020, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,8 +48,8 @@ options: required: true state: description: - If 'present' is used, the module creates a pool if it doesn't exist or - update it if it already exists. + If 'present' is used, the module creates a pool if it doesn't exist + or update it if it already exists. If 'absent' is used, the module will simply delete the pool. If 'list' is used, the module will return all details about the existing pools (json formatted). @@ -115,7 +116,7 @@ options: EXAMPLES = ''' pools: - - { name: foo, size: 3, application: rbd, pool_type: 'replicated', pg_autoscale_mode: 'on' } + - { name: foo, size: 3, application: rbd, pool_type: 'replicated', pg_autoscale_mode: 'on' } # noqa E501 - hosts: all become: true @@ -141,7 +142,6 @@ import stat # noqa E402 import time # noqa E402 - def container_exec(binary, container_image): ''' Build the docker CLI to run a command inside a container @@ -183,6 +183,7 @@ def pre_generate_ceph_cmd(container_image=None): return cmd + def generate_ceph_cmd(cluster, args, user, user_key, container_image=None): ''' Generate 'ceph' command line to execute @@ -215,19 +216,20 @@ def exec_commands(module, cmd): return rc, cmd, out, err -def check_pool_exist(cluster, name, user, user_key, output_format='json', container_image=None): + +def check_pool_exist(cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501 ''' Check if a given pool exists ''' - args = [ 'stats', name, '-f', output_format ] + args = ['stats', name, '-f', output_format] - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd -def generate_get_config_cmd(param, cluster, user, user_key, container_image=None): +def generate_get_config_cmd(param, cluster, user, user_key, container_image=None): # noqa E501 _cmd = pre_generate_ceph_cmd(container_image=container_image) args = [ '-n', @@ -245,17 +247,17 @@ def generate_get_config_cmd(param, cluster, user, user_key, container_image=None return cmd -def get_default_running_config(module, cluster, user, user_key, output_format='json', container_image=None): +def get_default_running_config(module, cluster, user, user_key, output_format='json', container_image=None): # noqa E501 ''' Get some default values set in the cluster ''' - params = ['osd_pool_default_size', 'osd_pool_default_min_size', 'osd_pool_default_pg_num', 'osd_pool_default_pgp_num'] + params = ['osd_pool_default_size', 'osd_pool_default_min_size', 'osd_pool_default_pg_num', 'osd_pool_default_pgp_num'] # noqa E501 default_running_values = {} for param in params: - rc, cmd, out, err = exec_commands(module, generate_get_config_cmd(param, cluster, user, user_key, container_image=container_image)) + rc, cmd, out, err = exec_commands(module, generate_get_config_cmd(param, cluster, user, user_key, container_image=container_image)) # noqa E501 if rc == 0: default_running_values[param] = out.strip() @@ -265,59 +267,57 @@ def get_default_running_config(module, cluster, user, user_key, output_format='j return rc, cmd, default_running_values, err -def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None): +def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501 ''' Get application type enabled on a given pool ''' + args = ['application', 'get', name, '-f', output_format] - args = [ 'application', 'get', name, '-f', output_format ] - - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd -def enable_application_pool(cluster, name, application, user, user_key, container_image=None): +def enable_application_pool(cluster, name, application, user, user_key, container_image=None): # noqa E501 ''' Enable application on a given pool ''' + args = ['application', 'enable', name, application] - args = [ 'application', 'enable', name, application ] - - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd -def disable_application_pool(cluster, name, application, user, user_key, container_image=None): +def disable_application_pool(cluster, name, application, user, user_key, container_image=None): # noqa E501 ''' Disable application on a given pool ''' - args = [ 'application', 'disable', name, application, '--yes-i-really-mean-it' ] + args = ['application', 'disable', name, application, '--yes-i-really-mean-it'] # noqa E501 - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd -def get_pool_details(module, cluster, name, user, user_key, output_format='json', container_image=None): +def get_pool_details(module, cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501 ''' Get details about a given pool ''' - args = [ 'ls', 'detail', '-f', output_format ] + args = ['ls', 'detail', '-f', output_format] - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 rc, cmd, out, err = exec_commands(module, cmd) if rc == 0: out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0] - _rc, _cmd, application_pool, _err = exec_commands(module, get_application_pool(cluster, name, user, user_key, container_image=container_image)) + _rc, _cmd, application_pool, _err = exec_commands(module, get_application_pool(cluster, name, user, user_key, container_image=container_image)) # noqa E501 application = list(json.loads(application_pool.strip()).keys()) @@ -335,60 +335,60 @@ def compare_pool_config(user_pool_config, running_pool_details): ''' delta = {} - filter_keys = [ 'pg_num', 'pg_placement_num', 'size', 'pg_autoscale_mode'] + filter_keys = ['pg_num', 'pg_placement_num', 'size', 'pg_autoscale_mode'] for key in filter_keys: if str(running_pool_details[key]) != user_pool_config[key]['value']: delta[key] = user_pool_config[key] - if str(running_pool_details['options'].get('target_size_ratio')) != user_pool_config['target_size_ratio']['value'] and user_pool_config['target_size_ratio']['value'] != None: + if str(running_pool_details['options'].get('target_size_ratio')) != user_pool_config['target_size_ratio']['value'] and user_pool_config['target_size_ratio']['value'] is not None: # noqa E501 delta['target_size_ratio'] = user_pool_config['target_size_ratio'] - if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] != None: + if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] is not None: # noqa E501 delta['application'] = {} - delta['application']['new_application'] = user_pool_config['application']['value'] + delta['application']['new_application'] = user_pool_config['application']['value'] # noqa E501 # to be improved (for update_pools()...) delta['application']['value'] = delta['application']['new_application'] - delta['application']['old_application'] = running_pool_details['application'] + delta['application']['old_application'] = running_pool_details['application'] # noqa E501 return delta -def list_pools(cluster, user, user_key, details, output_format='json', container_image=None): +def list_pools(cluster, user, user_key, details, output_format='json', container_image=None): # noqa E501 ''' List existing pools ''' - args = [ 'ls' ] + args = ['ls'] if details: args.append('detail') - args.extend([ '-f', output_format ]) + args.extend(['-f', output_format]) - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd -def create_pool(cluster, name, user, user_key, user_pool_config, container_image=None): +def create_pool(cluster, name, user, user_key, user_pool_config, container_image=None): # noqa E501 ''' Create a new pool ''' - args = [ 'create', user_pool_config['pool_name']['value'], '--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value'], user_pool_config['type']['value'] ] + args = ['create', user_pool_config['pool_name']['value'], '--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value'], user_pool_config['type']['value']] # noqa E501 if user_pool_config['type']['value'] == 'replicated': - args.extend([ user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--size', user_pool_config['size']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value'] ]) + args.extend([user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--size', user_pool_config['size']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']]) # noqa E501 elif user_pool_config['type']['value'] == 'erasure': - args.extend([ user_pool_config['erasure_profile']['value'] ]) + args.extend([user_pool_config['erasure_profile']['value']]) - if user_pool_config['crush_rule']['value'] != None: - args.extend([ user_pool_config['crush_rule']['value'] ]) + if user_pool_config['crush_rule']['value'] is not None: + args.extend([user_pool_config['crush_rule']['value']]) - args.extend([ '--expected_num_objects', user_pool_config['expected_num_objects']['value'] , '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']]) + args.extend(['--expected_num_objects', user_pool_config['expected_num_objects']['value'] , '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']]) # noqa E501 - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd @@ -398,14 +398,14 @@ def remove_pool(cluster, name, user, user_key, container_image=None): Remove a pool ''' - args = [ 'rm', name, name, '--yes-i-really-really-mean-it'] + args = ['rm', name, name, '--yes-i-really-really-mean-it'] - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 return cmd -def update_pool(module, cluster, name, user, user_key, delta, container_image=None): +def update_pool(module, cluster, name, user, user_key, delta, container_image=None): # noqa E501 ''' Update an existing pool ''' @@ -414,24 +414,24 @@ def update_pool(module, cluster, name, user, user_key, delta, container_image=No for key in delta.keys(): if key != 'application': - args = [ 'set', name, delta[key]['cli_set_opt'], delta[key]['value'] ] + args = ['set', name, delta[key]['cli_set_opt'], delta[key]['value']] # noqa E501 - cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) + cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501 rc, cmd, out, err = exec_commands(module, cmd) if rc != 0: return rc, cmd, out, err else: - rc, cmd, out, err = exec_commands(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) + rc, cmd, out, err = exec_commands(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa E501 if rc != 0: return rc, cmd, out, err - rc, cmd, out, err = exec_commands(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) + rc, cmd, out, err = exec_commands(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa E501 if rc != 0: return rc, cmd, out, err - report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) + report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa E501 out = report return rc, cmd, out, err @@ -453,6 +453,7 @@ def exit_module(module, out, rc, cmd, err, startd, changed=False): ) module.exit_json(**result) + def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), @@ -465,7 +466,7 @@ def run_module(): pgp_num=dict(type='str', required=False, default=None), pg_autoscale_mode=dict(type='str', required=False, default='on'), target_size_ratio=dict(type='str', required=False, default=None), - pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']), + pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']), # noqa E501 erasure_profile=dict(type='str', required=False, default='default'), rule_name=dict(type='str', required=False, default=None), expected_num_objects=dict(type='str', required=False, default="0"), @@ -490,7 +491,7 @@ def run_module(): if module.params.get('pg_autoscale_mode').lower() in ['true', 'on', 'yes']: pg_autoscale_mode = 'on' - elif module.params.get('pg_autoscale_mode').lower() in ['false', 'off', 'no']: + elif module.params.get('pg_autoscale_mode').lower() in ['false', 'off', 'no']: # noqa E501 pg_autoscale_mode = 'off' else: pg_autoscale_mode = 'warn' @@ -502,7 +503,7 @@ def run_module(): else: pool_type = module.params.get('pool_type') - if module.params.get('rule_name') == None: + if module.params.get('rule_name') is None: rule_name = 'replicated_rule' if pool_type == 'replicated' else None else: rule_name = module.params.get('rule_name') @@ -510,19 +511,17 @@ def run_module(): erasure_profile = module.params.get('erasure_profile') expected_num_objects = module.params.get('expected_num_objects') - - user_pool_config = { - 'pool_name': { 'value': name }, - 'pg_num': { 'value': pg_num, 'cli_set_opt': 'pg_num' }, - 'pgp_num': { 'value': pgp_num, 'cli_set_opt': 'pgp_num' }, - 'pg_autoscale_mode': { 'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode' }, - 'target_size_ratio': { 'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio' }, - 'application': {'value': application }, - 'type': { 'value': pool_type }, - 'erasure_profile': { 'value': erasure_profile }, - 'crush_rule': { 'value': rule_name, 'cli_set_opt': 'crush_rule' }, - 'expected_num_objects': { 'value': expected_num_objects } + 'pool_name': {'value': name}, + 'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'}, + 'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'}, + 'pg_autoscale_mode': {'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode'}, # noqa E501 + 'target_size_ratio': {'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio'}, # noqa E501 + 'application': {'value': application}, + 'type': {'value': pool_type}, + 'erasure_profile': {'value': erasure_profile}, + 'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'}, + 'expected_num_objects': {'value': expected_num_objects} } if module.check_mode: @@ -566,32 +565,32 @@ def run_module(): } if state == "present": - rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image) + rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image) # noqa E501 if rc == 0: for k, v in def_opt.items(): - if module.params[k] == None: - user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']} + if module.params[k] is None: + user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']} # noqa E501 else: - user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']} - rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) + user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']} # noqa E501 + rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) # noqa E501 if rc == 0: - running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image) - user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' } - delta = compare_pool_config(user_pool_config, running_pool_details[2]) - if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys(): - rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image) + running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image) # noqa E501 + user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' } # noqa E501 + delta = compare_pool_config(user_pool_config, running_pool_details[2]) # noqa E501 + if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys(): # noqa E501 + rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image) # noqa E501 if rc == 0: changed = True else: - out = "Pool {} already exists and there is nothing to update.".format(name) + out = "Pool {} already exists and there is nothing to update.".format(name) # noqa E501 else: - rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image)) - if user_pool_config['application']['value'] != None: - _rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image)) + rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image)) # noqa E501 + if user_pool_config['application']['value'] is not None: + _rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image)) # noqa E501 changed = True elif state == "list": - rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image)) + rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image)) # noqa E501 if rc != 0: out = "Couldn't list pool(s) present on the cluster" @@ -604,8 +603,7 @@ def run_module(): rc = 0 out = "Skipped, since pool {} doesn't exist".format(name) - - exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa E501 def main(): diff --git a/library/ceph_volume.py b/library/ceph_volume.py index 2e8e50d49..58100d03d 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -1,4 +1,5 @@ #!/usr/bin/python + import datetime import copy import json @@ -443,6 +444,7 @@ def list_osd(module, container_image): return cmd + def list_storage_inventory(module, container_image): ''' List storage inventory. @@ -454,6 +456,7 @@ def list_storage_inventory(module, container_image): return cmd + def activate_osd(): ''' Activate all the OSDs on a machine @@ -473,7 +476,7 @@ def is_lv(module, vg, lv, container_image): Check if an LV exists ''' - args = [ '--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg) ] + args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa E501 cmd = build_cmd(args, container_image, binary='lvs') @@ -614,8 +617,7 @@ def run_module(): if out_dict: data = module.params['data'] - result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501 - data) + result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa E501 result['rc'] = 0 module.exit_json(**result) @@ -635,18 +637,18 @@ def run_module(): elif action == 'zap': # Zap the OSD skip = [] - for device_type in ['journal','data', 'db', 'wal']: + for device_type in ['journal', 'data', 'db', 'wal']: # 1/ if we passed vg/lv - if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): + if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501 # 2/ check this is an actual lv/vg - ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) + ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa E501 skip.append(ret) # 3/ This isn't a lv/vg device if not ret: module.params['{}_vg'.format(device_type)] = False module.params[device_type] = False - # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device - elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): + # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa E501 + elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501 skip.append(True) cmd = zap_devices(module, container_image) @@ -694,7 +696,7 @@ def run_module(): strategy_changed_in_out = "strategy changed" in out strategy_changed_in_err = "strategy changed" in err strategy_changed = strategy_changed_in_out or \ - strategy_changed_in_err + strategy_changed_in_err if strategy_changed: if strategy_changed_in_out: out = json.dumps({"changed": False, diff --git a/library/igw_lun.py b/library/igw_lun.py index 93d94b127..6b9ba98bd 100644 --- a/library/igw_lun.py +++ b/library/igw_lun.py @@ -69,6 +69,7 @@ author: - 'Paul Cuzner' """ + import os # noqa E402 import logging # noqa E402 from logging.handlers import RotatingFileHandler # noqa E402 @@ -79,11 +80,10 @@ from ceph_iscsi_config.lun import LUN # noqa E402 from ceph_iscsi_config.utils import valid_size # noqa E402 import ceph_iscsi_config.settings as settings # noqa E402 + # the main function is called ansible_main to allow the call stack # to be checked to determine whether the call to the ceph_iscsi_config # modules is from ansible or not - - def ansible_main(): # Define the fields needs to create/map rbd's the the host(s) diff --git a/library/igw_purge.py b/library/igw_purge.py index 300522d29..3306395c5 100644 --- a/library/igw_purge.py +++ b/library/igw_purge.py @@ -68,11 +68,11 @@ def delete_images(cfg): try: rbd_dev.delete() - except rbd.ImageNotFound as err: + except rbd.ImageNotFound: # Just log and ignore. If we crashed while purging we could delete # the image but not removed it from the config logger.debug("Image already deleted.") - except rbd.ImageHasSnapshots as err: + except rbd.ImageHasSnapshots: logger.error("Image still has snapshots.") # Older versions of ceph-iscsi-config do not have a error_msg # string. @@ -81,9 +81,9 @@ def delete_images(cfg): if rbd_dev.error: if rbd_dev.error_msg: - logger.error("Could not remove {}. Error: {}. Manually run the " + logger.error("Could not remove {}. Error: {}. Manually run the " # noqa E501 "rbd command line tool to delete.". - format(image, rbd_error_msg)) + format(image, rbd_dev.error_msg)) else: logger.error("Could not remove {}. Manually run the rbd " "command line tool to delete.".format(image)) @@ -92,13 +92,14 @@ def delete_images(cfg): return changes_made -def delete_gateway_config(cfg): + +def delete_gateway_config(cfg, module): ioctx = cfg._open_ioctx() try: size, mtime = ioctx.stat(cfg.config_name) except rados.ObjectNotFound: logger.debug("gateway.conf already removed.") - return false + return False try: ioctx.remove_object(cfg.config_name) @@ -128,7 +129,7 @@ def ansible_main(): # # Purge gateway configuration, if the config has gateways if run_mode == 'gateway': - changes_made = delete_gateway_config(cfg) + changes_made = delete_gateway_config(cfg, module) elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0: # # Remove the disks on this host, that have been registered in the