library: flake8 ceph-ansible modules

This commit ensure all ceph-ansible modules pass flake8 properly.

Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
Co-authored-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 268a39ca0e)
(cherry picked from commit 32a2f04cbc)
pull/5905/head
Wong Hoi Sing Edison 2020-09-06 10:17:02 +08:00 committed by Guillaume Abrioux
parent 18240d8b99
commit dda1dec67e
6 changed files with 85 additions and 83 deletions

View File

@ -3,7 +3,12 @@
# Copyright 2018 Daniel Pivonka <dpivonka@redhat.com>
# Copyright 2018 Red Hat, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# GNU General Public License v3.0+
from ansible.module_utils.basic import AnsibleModule
from socket import error as socket_error
import boto
import radosgw
ANSIBLE_METADATA = {
'metadata_version': '1.1',
@ -107,7 +112,7 @@ option:
default: unlimited
bucketmaxobjects:
description:
- with bucket quota enabled specify maximum number of objects
- with bucket quota enabled specify maximum number of objects # noqa E501
required: false
default: unlimited
buckets:
@ -258,7 +263,7 @@ error_messages:
returned: always
type: list
sample: [
"test2: could not modify user: unable to modify user, cannot add duplicate email\n"
"test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa E501
]
failed_users:
@ -287,11 +292,6 @@ added_buckets:
'''
from ansible.module_utils.basic import AnsibleModule
from socket import error as socket_error
import boto
import radosgw
def create_users(rgw, users, result):
@ -321,7 +321,7 @@ def create_users(rgw, users, result):
# check if user exists
try:
user_info = rgw.get_user(uid=username)
except radosgw.exception.RadosGWAdminError as e:
except radosgw.exception.RadosGWAdminError:
# it doesnt exist
user_info = None
@ -334,36 +334,36 @@ def create_users(rgw, users, result):
if email:
if autogenkey:
try:
rgw.create_user(username, fullname, email=email, key_type='s3',
rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501
generate_key=autogenkey,
max_buckets=maxbucket, suspended=suspend)
max_buckets=maxbucket, suspended=suspend) # noqa E501
except radosgw.exception.RadosGWAdminError as e:
result['error_messages'].append(username + ' ' + e.get_code())
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
fail_flag = True
else:
try:
rgw.create_user(username, fullname, email=email, key_type='s3',
access_key=accesskey, secret_key=secretkey,
max_buckets=maxbucket, suspended=suspend)
rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501
access_key=accesskey, secret_key=secretkey, # noqa E501
max_buckets=maxbucket, suspended=suspend) # noqa E501
except radosgw.exception.RadosGWAdminError as e:
result['error_messages'].append(username + ' ' + e.get_code())
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
fail_flag = True
else:
if autogenkey:
try:
rgw.create_user(username, fullname, key_type='s3',
generate_key=autogenkey,
max_buckets=maxbucket, suspended=suspend)
max_buckets=maxbucket, suspended=suspend) # noqa E501
except radosgw.exception.RadosGWAdminError as e:
result['error_messages'].append(username + ' ' + e.get_code())
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
fail_flag = True
else:
try:
rgw.create_user(username, fullname, key_type='s3',
access_key=accesskey, secret_key=secretkey,
max_buckets=maxbucket, suspended=suspend)
access_key=accesskey, secret_key=secretkey, # noqa E501
max_buckets=maxbucket, suspended=suspend) # noqa E501
except radosgw.exception.RadosGWAdminError as e:
result['error_messages'].append(username + ' ' + e.get_code())
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
fail_flag = True
if not fail_flag and userquota:
@ -371,21 +371,21 @@ def create_users(rgw, users, result):
rgw.set_quota(username, 'user', max_objects=usermaxobjects,
max_size_kb=usermaxsize, enabled=True)
except radosgw.exception.RadosGWAdminError as e:
result['error_messages'].append(username + ' ' + e.get_code())
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
fail_flag = True
if not fail_flag and bucketquota:
try:
rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects,
rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa E501
max_size_kb=bucketmaxsize, enabled=True)
except radosgw.exception.RadosGWAdminError as e:
result['error_messages'].append(username + ' ' + e.get_code())
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
fail_flag = True
if fail_flag:
try:
rgw.delete_user(username)
except radosgw.exception.RadosGWAdminError as e:
except radosgw.exception.RadosGWAdminError:
pass
failed_users.append(username)
else:
@ -424,7 +424,7 @@ def create_buckets(rgw, buckets, result):
# check if user exists
try:
user_info = rgw.get_user(uid=user)
except radosgw.exception.RadosGWAdminError as e:
except radosgw.exception.RadosGWAdminError:
# it doesnt exist
user_info = None
@ -439,7 +439,7 @@ def create_buckets(rgw, buckets, result):
result['error_messages'].append(bucket + e.get_code())
try:
rgw.delete_bucket(bucket, purge_objects=True)
except radosgw.exception.RadosGWAdminError as e:
except radosgw.exception.RadosGWAdminError:
pass
failed_buckets.append(bucket)
@ -447,15 +447,15 @@ def create_buckets(rgw, buckets, result):
# user doesnt exist cant be link delete bucket
try:
rgw.delete_bucket(bucket, purge_objects=True)
except radosgw.exception.RadosGWAdminError as e:
except radosgw.exception.RadosGWAdminError:
pass
failed_buckets.append(bucket)
result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user)
result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa E501
else:
# something went wrong
failed_buckets.append(bucket)
result['error_messages'].append(bucket + ' could not be created')
result['error_messages'].append(bucket + ' could not be created') # noqa E501
result['added_buckets'] = ", ".join(added_buckets)
result['failed_buckets'] = ", ".join(failed_buckets)
@ -467,7 +467,7 @@ def create_bucket(rgw, bucket):
host=rgw._connection[0],
port=rgw.port,
is_secure=rgw.is_secure,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa E501
)
try:
@ -489,23 +489,23 @@ def main():
admin_access_key=dict(type='str', required=True),
admin_secret_key=dict(type='str', required=True),
buckets=dict(type='list', required=False, elements='dict',
options=dict(bucket=dict(type='str', required=True),
user=dict(type='str', required=True))),
options=dict(bucket=dict(type='str', required=True), # noqa E501
user=dict(type='str', required=True))), # noqa E501
users=dict(type='list', required=False, elements='dict',
options=dict(username=dict(type='str', required=True),
fullname=dict(type='str', required=True),
email=dict(type='str', required=False),
maxbucket=dict(type='int', required=False, default=1000),
suspend=dict(type='bool', required=False, default=False),
autogenkey=dict(type='bool', required=False, default=True),
accesskey=dict(type='str', required=False),
secretkey=dict(type='str', required=False),
userquota=dict(type='bool', required=False, default=False),
usermaxsize=dict(type='str', required=False, default='-1'),
usermaxobjects=dict(type='int', required=False, default=-1),
bucketquota=dict(type='bool', required=False, default=False),
bucketmaxsize=dict(type='str', required=False, default='-1'),
bucketmaxobjects=dict(type='int', required=False, default=-1))))
options=dict(username=dict(type='str', required=True), # noqa E501
fullname=dict(type='str', required=True), # noqa E501
email=dict(type='str', required=False), # noqa E501
maxbucket=dict(type='int', required=False, default=1000), # noqa E501
suspend=dict(type='bool', required=False, default=False), # noqa E501
autogenkey=dict(type='bool', required=False, default=True), # noqa E501
accesskey=dict(type='str', required=False), # noqa E501
secretkey=dict(type='str', required=False), # noqa E501
userquota=dict(type='bool', required=False, default=False), # noqa E501
usermaxsize=dict(type='str', required=False, default='-1'), # noqa E501
usermaxobjects=dict(type='int', required=False, default=-1), # noqa E501
bucketquota=dict(type='bool', required=False, default=False), # noqa E501
bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa E501
bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa E501
# the AnsibleModule object
module = AnsibleModule(argument_spec=fields,
@ -533,8 +533,8 @@ def main():
# radosgw connection
rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host,
port=port,
access_key=admin_access_key,
secret_key=admin_secret_key,
access_key=admin_access_key, # noqa E501
secret_key=admin_secret_key, # noqa E501
aws_signature='AWS4',
is_secure=is_secure)

View File

@ -1,14 +1,14 @@
#!/usr/bin/python
#
# Copyright (c) 2018 Red Hat, Inc.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# GNU General Public License v3.0+
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
import datetime
ANSIBLE_METADATA = {
'metadata_version': '1.1',
@ -61,9 +61,6 @@ EXAMPLES = '''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
import datetime
def fatal(message, module):
'''
@ -117,9 +114,9 @@ def sort_osd_crush_location(location, module):
"region",
"root",
]
return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0]))
return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa E501
except ValueError as error:
fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module)
fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa E501
def create_and_move_buckets_list(cluster, location, containerized=None):
@ -131,10 +128,10 @@ def create_and_move_buckets_list(cluster, location, containerized=None):
for item in location:
bucket_type, bucket_name = item
# ceph osd crush add-bucket maroot root
cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized))
cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa E501
if previous_bucket:
# ceph osd crush move monrack root=maroot
cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized))
cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa E501
previous_bucket = item[1]
return cmd_list
@ -181,7 +178,7 @@ def run_module():
startd = datetime.datetime.now()
# run the Ceph command to add buckets
rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized))
rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) # noqa E501
endd = datetime.datetime.now()
delta = endd - startd

View File

@ -1,4 +1,5 @@
#!/usr/bin/python3
# Copyright 2018, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -189,6 +190,7 @@ def str_to_bool(val):
else:
raise ValueError("Invalid input value: %s" % val)
def fatal(message, module):
'''
Report a fatal error and exit
@ -460,14 +462,14 @@ def lookup_ceph_initial_entities(module, out):
else:
fatal("'auth_dump' key not present in json output:", module) # noqa E501
if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)):
if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa E501
# must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
# it'd be in entities from the above test. Report what's missing.
missing = []
for e in CEPH_INITIAL_KEYS:
if e not in entities:
missing.append(e)
fatal("initial keyring does not contain keys: " + ' '.join(missing), module)
fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa E501
return entities
@ -555,8 +557,8 @@ def run_module():
file_path = dest
else:
if 'bootstrap' in dest:
# Build a different path for bootstrap keys as there are stored as
# /var/lib/ceph/bootstrap-rbd/ceph.keyring
# Build a different path for bootstrap keys as there are stored
# as /var/lib/ceph/bootstrap-rbd/ceph.keyring
keyring_filename = cluster + '.keyring'
else:
keyring_filename = cluster + "." + name + ".keyring"
@ -651,7 +653,7 @@ def run_module():
rc, cmd, out, err = exec_commands(
module, list_keys(cluster, user, user_key, container_image))
if rc != 0:
result["stdout"] = "failed to retrieve ceph keys".format(name)
result["stdout"] = "failed to retrieve ceph keys"
result["sdterr"] = err
result['rc'] = 0
module.exit_json(**result)

View File

@ -1,4 +1,5 @@
#!/usr/bin/python
import datetime
import copy
import json
@ -433,6 +434,7 @@ def list_osd(module, container_image):
return cmd
def list_storage_inventory(module, container_image):
'''
List storage inventory.
@ -444,6 +446,7 @@ def list_storage_inventory(module, container_image):
return cmd
def activate_osd():
'''
Activate all the OSDs on a machine
@ -463,7 +466,7 @@ def is_lv(module, vg, lv, container_image):
Check if an LV exists
'''
args = [ '--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg) ]
args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa E501
cmd = build_cmd(args, container_image, binary='lvs')
@ -603,8 +606,7 @@ def run_module():
if out_dict:
data = module.params['data']
result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501
data)
result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa E501
result['rc'] = 0
module.exit_json(**result)
@ -624,18 +626,18 @@ def run_module():
elif action == 'zap':
# Zap the OSD
skip = []
for device_type in ['journal','data', 'db', 'wal']:
for device_type in ['journal', 'data', 'db', 'wal']:
# 1/ if we passed vg/lv
if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None):
if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501
# 2/ check this is an actual lv/vg
ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image)
ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa E501
skip.append(ret)
# 3/ This isn't a lv/vg device
if not ret:
module.params['{}_vg'.format(device_type)] = False
module.params[device_type] = False
# 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device
elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None):
# 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa E501
elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501
skip.append(True)
cmd = zap_devices(module, container_image)
@ -683,7 +685,7 @@ def run_module():
strategy_changed_in_out = "strategy changed" in out
strategy_changed_in_err = "strategy changed" in err
strategy_changed = strategy_changed_in_out or \
strategy_changed_in_err
strategy_changed_in_err
if strategy_changed:
if strategy_changed_in_out:
out = json.dumps({"changed": False,

View File

@ -69,6 +69,7 @@ author:
- 'Paul Cuzner'
"""
import os # noqa E402
import logging # noqa E402
from logging.handlers import RotatingFileHandler # noqa E402
@ -79,11 +80,10 @@ from ceph_iscsi_config.lun import LUN # noqa E402
from ceph_iscsi_config.utils import valid_size # noqa E402
import ceph_iscsi_config.settings as settings # noqa E402
# the main function is called ansible_main to allow the call stack
# to be checked to determine whether the call to the ceph_iscsi_config
# modules is from ansible or not
def ansible_main():
# Define the fields needs to create/map rbd's the the host(s)

View File

@ -68,11 +68,11 @@ def delete_images(cfg):
try:
rbd_dev.delete()
except rbd.ImageNotFound as err:
except rbd.ImageNotFound:
# Just log and ignore. If we crashed while purging we could delete
# the image but not removed it from the config
logger.debug("Image already deleted.")
except rbd.ImageHasSnapshots as err:
except rbd.ImageHasSnapshots:
logger.error("Image still has snapshots.")
# Older versions of ceph-iscsi-config do not have a error_msg
# string.
@ -81,9 +81,9 @@ def delete_images(cfg):
if rbd_dev.error:
if rbd_dev.error_msg:
logger.error("Could not remove {}. Error: {}. Manually run the "
logger.error("Could not remove {}. Error: {}. Manually run the " # noqa E501
"rbd command line tool to delete.".
format(image, rbd_error_msg))
format(image, rbd_dev.error_msg))
else:
logger.error("Could not remove {}. Manually run the rbd "
"command line tool to delete.".format(image))
@ -92,13 +92,14 @@ def delete_images(cfg):
return changes_made
def delete_gateway_config(cfg):
def delete_gateway_config(cfg, module):
ioctx = cfg._open_ioctx()
try:
size, mtime = ioctx.stat(cfg.config_name)
except rados.ObjectNotFound:
logger.debug("gateway.conf already removed.")
return false
return False
try:
ioctx.remove_object(cfg.config_name)
@ -128,7 +129,7 @@ def ansible_main():
#
# Purge gateway configuration, if the config has gateways
if run_mode == 'gateway':
changes_made = delete_gateway_config(cfg)
changes_made = delete_gateway_config(cfg, module)
elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0:
#
# Remove the disks on this host, that have been registered in the