mirror of https://github.com/ceph/ceph-ansible.git
library: flake8 ceph-ansible modules
This commit ensure all ceph-ansible modules pass flake8 properly.
Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
Co-authored-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 268a39ca0e
)
pull/5880/merge
parent
10fc2d1d92
commit
32a2f04cbc
|
@ -3,7 +3,12 @@
|
||||||
# Copyright 2018 Daniel Pivonka <dpivonka@redhat.com>
|
# Copyright 2018 Daniel Pivonka <dpivonka@redhat.com>
|
||||||
# Copyright 2018 Red Hat, Inc.
|
# Copyright 2018 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
from socket import error as socket_error
|
||||||
|
import boto
|
||||||
|
import radosgw
|
||||||
|
|
||||||
ANSIBLE_METADATA = {
|
ANSIBLE_METADATA = {
|
||||||
'metadata_version': '1.1',
|
'metadata_version': '1.1',
|
||||||
|
@ -107,7 +112,7 @@ option:
|
||||||
default: unlimited
|
default: unlimited
|
||||||
bucketmaxobjects:
|
bucketmaxobjects:
|
||||||
description:
|
description:
|
||||||
- with bucket quota enabled specify maximum number of objects
|
- with bucket quota enabled specify maximum number of objects # noqa E501
|
||||||
required: false
|
required: false
|
||||||
default: unlimited
|
default: unlimited
|
||||||
buckets:
|
buckets:
|
||||||
|
@ -258,7 +263,7 @@ error_messages:
|
||||||
returned: always
|
returned: always
|
||||||
type: list
|
type: list
|
||||||
sample: [
|
sample: [
|
||||||
"test2: could not modify user: unable to modify user, cannot add duplicate email\n"
|
"test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa E501
|
||||||
]
|
]
|
||||||
|
|
||||||
failed_users:
|
failed_users:
|
||||||
|
@ -287,11 +292,6 @@ added_buckets:
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
|
||||||
from socket import error as socket_error
|
|
||||||
import boto
|
|
||||||
import radosgw
|
|
||||||
|
|
||||||
|
|
||||||
def create_users(rgw, users, result):
|
def create_users(rgw, users, result):
|
||||||
|
|
||||||
|
@ -321,7 +321,7 @@ def create_users(rgw, users, result):
|
||||||
# check if user exists
|
# check if user exists
|
||||||
try:
|
try:
|
||||||
user_info = rgw.get_user(uid=username)
|
user_info = rgw.get_user(uid=username)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError:
|
||||||
# it doesnt exist
|
# it doesnt exist
|
||||||
user_info = None
|
user_info = None
|
||||||
|
|
||||||
|
@ -334,36 +334,36 @@ def create_users(rgw, users, result):
|
||||||
if email:
|
if email:
|
||||||
if autogenkey:
|
if autogenkey:
|
||||||
try:
|
try:
|
||||||
rgw.create_user(username, fullname, email=email, key_type='s3',
|
rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501
|
||||||
generate_key=autogenkey,
|
generate_key=autogenkey,
|
||||||
max_buckets=maxbucket, suspended=suspend)
|
max_buckets=maxbucket, suspended=suspend) # noqa E501
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError as e:
|
||||||
result['error_messages'].append(username + ' ' + e.get_code())
|
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
|
||||||
fail_flag = True
|
fail_flag = True
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
rgw.create_user(username, fullname, email=email, key_type='s3',
|
rgw.create_user(username, fullname, email=email, key_type='s3', # noqa E501
|
||||||
access_key=accesskey, secret_key=secretkey,
|
access_key=accesskey, secret_key=secretkey, # noqa E501
|
||||||
max_buckets=maxbucket, suspended=suspend)
|
max_buckets=maxbucket, suspended=suspend) # noqa E501
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError as e:
|
||||||
result['error_messages'].append(username + ' ' + e.get_code())
|
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
|
||||||
fail_flag = True
|
fail_flag = True
|
||||||
else:
|
else:
|
||||||
if autogenkey:
|
if autogenkey:
|
||||||
try:
|
try:
|
||||||
rgw.create_user(username, fullname, key_type='s3',
|
rgw.create_user(username, fullname, key_type='s3',
|
||||||
generate_key=autogenkey,
|
generate_key=autogenkey,
|
||||||
max_buckets=maxbucket, suspended=suspend)
|
max_buckets=maxbucket, suspended=suspend) # noqa E501
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError as e:
|
||||||
result['error_messages'].append(username + ' ' + e.get_code())
|
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
|
||||||
fail_flag = True
|
fail_flag = True
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
rgw.create_user(username, fullname, key_type='s3',
|
rgw.create_user(username, fullname, key_type='s3',
|
||||||
access_key=accesskey, secret_key=secretkey,
|
access_key=accesskey, secret_key=secretkey, # noqa E501
|
||||||
max_buckets=maxbucket, suspended=suspend)
|
max_buckets=maxbucket, suspended=suspend) # noqa E501
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError as e:
|
||||||
result['error_messages'].append(username + ' ' + e.get_code())
|
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
|
||||||
fail_flag = True
|
fail_flag = True
|
||||||
|
|
||||||
if not fail_flag and userquota:
|
if not fail_flag and userquota:
|
||||||
|
@ -371,21 +371,21 @@ def create_users(rgw, users, result):
|
||||||
rgw.set_quota(username, 'user', max_objects=usermaxobjects,
|
rgw.set_quota(username, 'user', max_objects=usermaxobjects,
|
||||||
max_size_kb=usermaxsize, enabled=True)
|
max_size_kb=usermaxsize, enabled=True)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError as e:
|
||||||
result['error_messages'].append(username + ' ' + e.get_code())
|
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
|
||||||
fail_flag = True
|
fail_flag = True
|
||||||
|
|
||||||
if not fail_flag and bucketquota:
|
if not fail_flag and bucketquota:
|
||||||
try:
|
try:
|
||||||
rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects,
|
rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa E501
|
||||||
max_size_kb=bucketmaxsize, enabled=True)
|
max_size_kb=bucketmaxsize, enabled=True)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError as e:
|
||||||
result['error_messages'].append(username + ' ' + e.get_code())
|
result['error_messages'].append(username + ' ' + e.get_code()) # noqa E501
|
||||||
fail_flag = True
|
fail_flag = True
|
||||||
|
|
||||||
if fail_flag:
|
if fail_flag:
|
||||||
try:
|
try:
|
||||||
rgw.delete_user(username)
|
rgw.delete_user(username)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError:
|
||||||
pass
|
pass
|
||||||
failed_users.append(username)
|
failed_users.append(username)
|
||||||
else:
|
else:
|
||||||
|
@ -424,7 +424,7 @@ def create_buckets(rgw, buckets, result):
|
||||||
# check if user exists
|
# check if user exists
|
||||||
try:
|
try:
|
||||||
user_info = rgw.get_user(uid=user)
|
user_info = rgw.get_user(uid=user)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError:
|
||||||
# it doesnt exist
|
# it doesnt exist
|
||||||
user_info = None
|
user_info = None
|
||||||
|
|
||||||
|
@ -439,7 +439,7 @@ def create_buckets(rgw, buckets, result):
|
||||||
result['error_messages'].append(bucket + e.get_code())
|
result['error_messages'].append(bucket + e.get_code())
|
||||||
try:
|
try:
|
||||||
rgw.delete_bucket(bucket, purge_objects=True)
|
rgw.delete_bucket(bucket, purge_objects=True)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError:
|
||||||
pass
|
pass
|
||||||
failed_buckets.append(bucket)
|
failed_buckets.append(bucket)
|
||||||
|
|
||||||
|
@ -447,15 +447,15 @@ def create_buckets(rgw, buckets, result):
|
||||||
# user doesnt exist cant be link delete bucket
|
# user doesnt exist cant be link delete bucket
|
||||||
try:
|
try:
|
||||||
rgw.delete_bucket(bucket, purge_objects=True)
|
rgw.delete_bucket(bucket, purge_objects=True)
|
||||||
except radosgw.exception.RadosGWAdminError as e:
|
except radosgw.exception.RadosGWAdminError:
|
||||||
pass
|
pass
|
||||||
failed_buckets.append(bucket)
|
failed_buckets.append(bucket)
|
||||||
result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user)
|
result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa E501
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# something went wrong
|
# something went wrong
|
||||||
failed_buckets.append(bucket)
|
failed_buckets.append(bucket)
|
||||||
result['error_messages'].append(bucket + ' could not be created')
|
result['error_messages'].append(bucket + ' could not be created') # noqa E501
|
||||||
|
|
||||||
result['added_buckets'] = ", ".join(added_buckets)
|
result['added_buckets'] = ", ".join(added_buckets)
|
||||||
result['failed_buckets'] = ", ".join(failed_buckets)
|
result['failed_buckets'] = ", ".join(failed_buckets)
|
||||||
|
@ -467,7 +467,7 @@ def create_bucket(rgw, bucket):
|
||||||
host=rgw._connection[0],
|
host=rgw._connection[0],
|
||||||
port=rgw.port,
|
port=rgw.port,
|
||||||
is_secure=rgw.is_secure,
|
is_secure=rgw.is_secure,
|
||||||
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
|
calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa E501
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -489,23 +489,23 @@ def main():
|
||||||
admin_access_key=dict(type='str', required=True),
|
admin_access_key=dict(type='str', required=True),
|
||||||
admin_secret_key=dict(type='str', required=True),
|
admin_secret_key=dict(type='str', required=True),
|
||||||
buckets=dict(type='list', required=False, elements='dict',
|
buckets=dict(type='list', required=False, elements='dict',
|
||||||
options=dict(bucket=dict(type='str', required=True),
|
options=dict(bucket=dict(type='str', required=True), # noqa E501
|
||||||
user=dict(type='str', required=True))),
|
user=dict(type='str', required=True))), # noqa E501
|
||||||
users=dict(type='list', required=False, elements='dict',
|
users=dict(type='list', required=False, elements='dict',
|
||||||
options=dict(username=dict(type='str', required=True),
|
options=dict(username=dict(type='str', required=True), # noqa E501
|
||||||
fullname=dict(type='str', required=True),
|
fullname=dict(type='str', required=True), # noqa E501
|
||||||
email=dict(type='str', required=False),
|
email=dict(type='str', required=False), # noqa E501
|
||||||
maxbucket=dict(type='int', required=False, default=1000),
|
maxbucket=dict(type='int', required=False, default=1000), # noqa E501
|
||||||
suspend=dict(type='bool', required=False, default=False),
|
suspend=dict(type='bool', required=False, default=False), # noqa E501
|
||||||
autogenkey=dict(type='bool', required=False, default=True),
|
autogenkey=dict(type='bool', required=False, default=True), # noqa E501
|
||||||
accesskey=dict(type='str', required=False),
|
accesskey=dict(type='str', required=False), # noqa E501
|
||||||
secretkey=dict(type='str', required=False),
|
secretkey=dict(type='str', required=False), # noqa E501
|
||||||
userquota=dict(type='bool', required=False, default=False),
|
userquota=dict(type='bool', required=False, default=False), # noqa E501
|
||||||
usermaxsize=dict(type='str', required=False, default='-1'),
|
usermaxsize=dict(type='str', required=False, default='-1'), # noqa E501
|
||||||
usermaxobjects=dict(type='int', required=False, default=-1),
|
usermaxobjects=dict(type='int', required=False, default=-1), # noqa E501
|
||||||
bucketquota=dict(type='bool', required=False, default=False),
|
bucketquota=dict(type='bool', required=False, default=False), # noqa E501
|
||||||
bucketmaxsize=dict(type='str', required=False, default='-1'),
|
bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa E501
|
||||||
bucketmaxobjects=dict(type='int', required=False, default=-1))))
|
bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa E501
|
||||||
|
|
||||||
# the AnsibleModule object
|
# the AnsibleModule object
|
||||||
module = AnsibleModule(argument_spec=fields,
|
module = AnsibleModule(argument_spec=fields,
|
||||||
|
@ -533,8 +533,8 @@ def main():
|
||||||
# radosgw connection
|
# radosgw connection
|
||||||
rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host,
|
rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host,
|
||||||
port=port,
|
port=port,
|
||||||
access_key=admin_access_key,
|
access_key=admin_access_key, # noqa E501
|
||||||
secret_key=admin_secret_key,
|
secret_key=admin_secret_key, # noqa E501
|
||||||
aws_signature='AWS4',
|
aws_signature='AWS4',
|
||||||
is_secure=is_secure)
|
is_secure=is_secure)
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright (c) 2018 Red Hat, Inc.
|
# Copyright (c) 2018 Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
# GNU General Public License v3.0+
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function
|
from __future__ import absolute_import, division, print_function
|
||||||
__metaclass__ = type
|
__metaclass__ = type
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule
|
||||||
|
import datetime
|
||||||
|
|
||||||
ANSIBLE_METADATA = {
|
ANSIBLE_METADATA = {
|
||||||
'metadata_version': '1.1',
|
'metadata_version': '1.1',
|
||||||
|
@ -61,9 +61,6 @@ EXAMPLES = '''
|
||||||
|
|
||||||
RETURN = '''# '''
|
RETURN = '''# '''
|
||||||
|
|
||||||
from ansible.module_utils.basic import AnsibleModule
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
|
|
||||||
def fatal(message, module):
|
def fatal(message, module):
|
||||||
'''
|
'''
|
||||||
|
@ -117,9 +114,9 @@ def sort_osd_crush_location(location, module):
|
||||||
"region",
|
"region",
|
||||||
"root",
|
"root",
|
||||||
]
|
]
|
||||||
return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0]))
|
return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa E501
|
||||||
except ValueError as error:
|
except ValueError as error:
|
||||||
fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module)
|
fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa E501
|
||||||
|
|
||||||
|
|
||||||
def create_and_move_buckets_list(cluster, location, containerized=None):
|
def create_and_move_buckets_list(cluster, location, containerized=None):
|
||||||
|
@ -131,10 +128,10 @@ def create_and_move_buckets_list(cluster, location, containerized=None):
|
||||||
for item in location:
|
for item in location:
|
||||||
bucket_type, bucket_name = item
|
bucket_type, bucket_name = item
|
||||||
# ceph osd crush add-bucket maroot root
|
# ceph osd crush add-bucket maroot root
|
||||||
cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized))
|
cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa E501
|
||||||
if previous_bucket:
|
if previous_bucket:
|
||||||
# ceph osd crush move monrack root=maroot
|
# ceph osd crush move monrack root=maroot
|
||||||
cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized))
|
cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa E501
|
||||||
previous_bucket = item[1]
|
previous_bucket = item[1]
|
||||||
return cmd_list
|
return cmd_list
|
||||||
|
|
||||||
|
@ -181,7 +178,7 @@ def run_module():
|
||||||
startd = datetime.datetime.now()
|
startd = datetime.datetime.now()
|
||||||
|
|
||||||
# run the Ceph command to add buckets
|
# run the Ceph command to add buckets
|
||||||
rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized))
|
rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) # noqa E501
|
||||||
|
|
||||||
endd = datetime.datetime.now()
|
endd = datetime.datetime.now()
|
||||||
delta = endd - startd
|
delta = endd - startd
|
||||||
|
|
|
@ -280,7 +280,10 @@ def run_module():
|
||||||
name=dict(type='str', required=True),
|
name=dict(type='str', required=True),
|
||||||
state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'),
|
state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'),
|
||||||
password=dict(type='str', required=False, no_log=True),
|
password=dict(type='str', required=False, no_log=True),
|
||||||
roles=dict(type='list', required=False, choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'], default=[]),
|
roles=dict(type='list',
|
||||||
|
required=False,
|
||||||
|
choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'],
|
||||||
|
default=[]),
|
||||||
)
|
)
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
# Copyright 2018, Red Hat, Inc.
|
# Copyright 2018, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -189,6 +190,7 @@ def str_to_bool(val):
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid input value: %s" % val)
|
raise ValueError("Invalid input value: %s" % val)
|
||||||
|
|
||||||
|
|
||||||
def fatal(message, module):
|
def fatal(message, module):
|
||||||
'''
|
'''
|
||||||
Report a fatal error and exit
|
Report a fatal error and exit
|
||||||
|
@ -460,14 +462,14 @@ def lookup_ceph_initial_entities(module, out):
|
||||||
else:
|
else:
|
||||||
fatal("'auth_dump' key not present in json output:", module) # noqa E501
|
fatal("'auth_dump' key not present in json output:", module) # noqa E501
|
||||||
|
|
||||||
if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)):
|
if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa E501
|
||||||
# must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
|
# must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
|
||||||
# it'd be in entities from the above test. Report what's missing.
|
# it'd be in entities from the above test. Report what's missing.
|
||||||
missing = []
|
missing = []
|
||||||
for e in CEPH_INITIAL_KEYS:
|
for e in CEPH_INITIAL_KEYS:
|
||||||
if e not in entities:
|
if e not in entities:
|
||||||
missing.append(e)
|
missing.append(e)
|
||||||
fatal("initial keyring does not contain keys: " + ' '.join(missing), module)
|
fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa E501
|
||||||
return entities
|
return entities
|
||||||
|
|
||||||
|
|
||||||
|
@ -561,8 +563,8 @@ def run_module():
|
||||||
file_path = dest
|
file_path = dest
|
||||||
else:
|
else:
|
||||||
if 'bootstrap' in dest:
|
if 'bootstrap' in dest:
|
||||||
# Build a different path for bootstrap keys as there are stored as
|
# Build a different path for bootstrap keys as there are stored
|
||||||
# /var/lib/ceph/bootstrap-rbd/ceph.keyring
|
# as /var/lib/ceph/bootstrap-rbd/ceph.keyring
|
||||||
keyring_filename = cluster + '.keyring'
|
keyring_filename = cluster + '.keyring'
|
||||||
else:
|
else:
|
||||||
keyring_filename = cluster + "." + name + ".keyring"
|
keyring_filename = cluster + "." + name + ".keyring"
|
||||||
|
@ -605,7 +607,7 @@ def run_module():
|
||||||
result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa E501
|
result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa E501
|
||||||
result["rc"] = 0
|
result["rc"] = 0
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0:
|
if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa E501
|
||||||
rc, cmd, out, err = exec_commands(module, create_key(
|
rc, cmd, out, err = exec_commands(module, create_key(
|
||||||
module, result, cluster, name, secret, caps, import_key, file_path, container_image)) # noqa E501
|
module, result, cluster, name, secret, caps, import_key, file_path, container_image)) # noqa E501
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
|
@ -615,7 +617,6 @@ def run_module():
|
||||||
module.set_fs_attributes_if_different(file_args, False)
|
module.set_fs_attributes_if_different(file_args, False)
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
|
|
||||||
elif state == "absent":
|
elif state == "absent":
|
||||||
if key_exist == 0:
|
if key_exist == 0:
|
||||||
rc, cmd, out, err = exec_commands(
|
rc, cmd, out, err = exec_commands(
|
||||||
|
@ -645,7 +646,7 @@ def run_module():
|
||||||
rc, cmd, out, err = exec_commands(
|
rc, cmd, out, err = exec_commands(
|
||||||
module, list_keys(cluster, user, user_key, container_image))
|
module, list_keys(cluster, user, user_key, container_image))
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
result["stdout"] = "failed to retrieve ceph keys".format(name)
|
result["stdout"] = "failed to retrieve ceph keys"
|
||||||
result["sdterr"] = err
|
result["sdterr"] = err
|
||||||
result['rc'] = 0
|
result['rc'] = 0
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
# Copyright 2020, Red Hat, Inc.
|
# Copyright 2020, Red Hat, Inc.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -47,8 +48,8 @@ options:
|
||||||
required: true
|
required: true
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
If 'present' is used, the module creates a pool if it doesn't exist or
|
If 'present' is used, the module creates a pool if it doesn't exist
|
||||||
update it if it already exists.
|
or update it if it already exists.
|
||||||
If 'absent' is used, the module will simply delete the pool.
|
If 'absent' is used, the module will simply delete the pool.
|
||||||
If 'list' is used, the module will return all details about the existing pools
|
If 'list' is used, the module will return all details about the existing pools
|
||||||
(json formatted).
|
(json formatted).
|
||||||
|
@ -115,7 +116,7 @@ options:
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
|
|
||||||
pools:
|
pools:
|
||||||
- { name: foo, size: 3, application: rbd, pool_type: 'replicated', pg_autoscale_mode: 'on' }
|
- { name: foo, size: 3, application: rbd, pool_type: 'replicated', pg_autoscale_mode: 'on' } # noqa E501
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
become: true
|
become: true
|
||||||
|
@ -141,7 +142,6 @@ import stat # noqa E402
|
||||||
import time # noqa E402
|
import time # noqa E402
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def container_exec(binary, container_image):
|
def container_exec(binary, container_image):
|
||||||
'''
|
'''
|
||||||
Build the docker CLI to run a command inside a container
|
Build the docker CLI to run a command inside a container
|
||||||
|
@ -183,6 +183,7 @@ def pre_generate_ceph_cmd(container_image=None):
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def generate_ceph_cmd(cluster, args, user, user_key, container_image=None):
|
def generate_ceph_cmd(cluster, args, user, user_key, container_image=None):
|
||||||
'''
|
'''
|
||||||
Generate 'ceph' command line to execute
|
Generate 'ceph' command line to execute
|
||||||
|
@ -215,19 +216,20 @@ def exec_commands(module, cmd):
|
||||||
|
|
||||||
return rc, cmd, out, err
|
return rc, cmd, out, err
|
||||||
|
|
||||||
def check_pool_exist(cluster, name, user, user_key, output_format='json', container_image=None):
|
|
||||||
|
def check_pool_exist(cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Check if a given pool exists
|
Check if a given pool exists
|
||||||
'''
|
'''
|
||||||
|
|
||||||
args = ['stats', name, '-f', output_format]
|
args = ['stats', name, '-f', output_format]
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def generate_get_config_cmd(param, cluster, user, user_key, container_image=None):
|
def generate_get_config_cmd(param, cluster, user, user_key, container_image=None): # noqa E501
|
||||||
_cmd = pre_generate_ceph_cmd(container_image=container_image)
|
_cmd = pre_generate_ceph_cmd(container_image=container_image)
|
||||||
args = [
|
args = [
|
||||||
'-n',
|
'-n',
|
||||||
|
@ -245,17 +247,17 @@ def generate_get_config_cmd(param, cluster, user, user_key, container_image=None
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def get_default_running_config(module, cluster, user, user_key, output_format='json', container_image=None):
|
def get_default_running_config(module, cluster, user, user_key, output_format='json', container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Get some default values set in the cluster
|
Get some default values set in the cluster
|
||||||
'''
|
'''
|
||||||
|
|
||||||
params = ['osd_pool_default_size', 'osd_pool_default_min_size', 'osd_pool_default_pg_num', 'osd_pool_default_pgp_num']
|
params = ['osd_pool_default_size', 'osd_pool_default_min_size', 'osd_pool_default_pg_num', 'osd_pool_default_pgp_num'] # noqa E501
|
||||||
|
|
||||||
default_running_values = {}
|
default_running_values = {}
|
||||||
|
|
||||||
for param in params:
|
for param in params:
|
||||||
rc, cmd, out, err = exec_commands(module, generate_get_config_cmd(param, cluster, user, user_key, container_image=container_image))
|
rc, cmd, out, err = exec_commands(module, generate_get_config_cmd(param, cluster, user, user_key, container_image=container_image)) # noqa E501
|
||||||
|
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
default_running_values[param] = out.strip()
|
default_running_values[param] = out.strip()
|
||||||
|
@ -265,59 +267,57 @@ def get_default_running_config(module, cluster, user, user_key, output_format='j
|
||||||
return rc, cmd, default_running_values, err
|
return rc, cmd, default_running_values, err
|
||||||
|
|
||||||
|
|
||||||
def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None):
|
def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Get application type enabled on a given pool
|
Get application type enabled on a given pool
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
args = ['application', 'get', name, '-f', output_format]
|
args = ['application', 'get', name, '-f', output_format]
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def enable_application_pool(cluster, name, application, user, user_key, container_image=None):
|
def enable_application_pool(cluster, name, application, user, user_key, container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Enable application on a given pool
|
Enable application on a given pool
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
args = ['application', 'enable', name, application]
|
args = ['application', 'enable', name, application]
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def disable_application_pool(cluster, name, application, user, user_key, container_image=None):
|
def disable_application_pool(cluster, name, application, user, user_key, container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Disable application on a given pool
|
Disable application on a given pool
|
||||||
'''
|
'''
|
||||||
|
|
||||||
args = [ 'application', 'disable', name, application, '--yes-i-really-mean-it' ]
|
args = ['application', 'disable', name, application, '--yes-i-really-mean-it'] # noqa E501
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def get_pool_details(module, cluster, name, user, user_key, output_format='json', container_image=None):
|
def get_pool_details(module, cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Get details about a given pool
|
Get details about a given pool
|
||||||
'''
|
'''
|
||||||
|
|
||||||
args = ['ls', 'detail', '-f', output_format]
|
args = ['ls', 'detail', '-f', output_format]
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
rc, cmd, out, err = exec_commands(module, cmd)
|
rc, cmd, out, err = exec_commands(module, cmd)
|
||||||
|
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0]
|
out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0]
|
||||||
|
|
||||||
_rc, _cmd, application_pool, _err = exec_commands(module, get_application_pool(cluster, name, user, user_key, container_image=container_image))
|
_rc, _cmd, application_pool, _err = exec_commands(module, get_application_pool(cluster, name, user, user_key, container_image=container_image)) # noqa E501
|
||||||
|
|
||||||
application = list(json.loads(application_pool.strip()).keys())
|
application = list(json.loads(application_pool.strip()).keys())
|
||||||
|
|
||||||
|
@ -340,20 +340,20 @@ def compare_pool_config(user_pool_config, running_pool_details):
|
||||||
if str(running_pool_details[key]) != user_pool_config[key]['value']:
|
if str(running_pool_details[key]) != user_pool_config[key]['value']:
|
||||||
delta[key] = user_pool_config[key]
|
delta[key] = user_pool_config[key]
|
||||||
|
|
||||||
if str(running_pool_details['options'].get('target_size_ratio')) != user_pool_config['target_size_ratio']['value'] and user_pool_config['target_size_ratio']['value'] != None:
|
if str(running_pool_details['options'].get('target_size_ratio')) != user_pool_config['target_size_ratio']['value'] and user_pool_config['target_size_ratio']['value'] is not None: # noqa E501
|
||||||
delta['target_size_ratio'] = user_pool_config['target_size_ratio']
|
delta['target_size_ratio'] = user_pool_config['target_size_ratio']
|
||||||
|
|
||||||
if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] != None:
|
if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] is not None: # noqa E501
|
||||||
delta['application'] = {}
|
delta['application'] = {}
|
||||||
delta['application']['new_application'] = user_pool_config['application']['value']
|
delta['application']['new_application'] = user_pool_config['application']['value'] # noqa E501
|
||||||
# to be improved (for update_pools()...)
|
# to be improved (for update_pools()...)
|
||||||
delta['application']['value'] = delta['application']['new_application']
|
delta['application']['value'] = delta['application']['new_application']
|
||||||
delta['application']['old_application'] = running_pool_details['application']
|
delta['application']['old_application'] = running_pool_details['application'] # noqa E501
|
||||||
|
|
||||||
return delta
|
return delta
|
||||||
|
|
||||||
|
|
||||||
def list_pools(cluster, user, user_key, details, output_format='json', container_image=None):
|
def list_pools(cluster, user, user_key, details, output_format='json', container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
List existing pools
|
List existing pools
|
||||||
'''
|
'''
|
||||||
|
@ -365,30 +365,30 @@ def list_pools(cluster, user, user_key, details, output_format='json', container
|
||||||
|
|
||||||
args.extend(['-f', output_format])
|
args.extend(['-f', output_format])
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def create_pool(cluster, name, user, user_key, user_pool_config, container_image=None):
|
def create_pool(cluster, name, user, user_key, user_pool_config, container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Create a new pool
|
Create a new pool
|
||||||
'''
|
'''
|
||||||
|
|
||||||
args = [ 'create', user_pool_config['pool_name']['value'], '--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value'], user_pool_config['type']['value'] ]
|
args = ['create', user_pool_config['pool_name']['value'], '--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value'], user_pool_config['type']['value']] # noqa E501
|
||||||
|
|
||||||
if user_pool_config['type']['value'] == 'replicated':
|
if user_pool_config['type']['value'] == 'replicated':
|
||||||
args.extend([ user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--size', user_pool_config['size']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value'] ])
|
args.extend([user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--size', user_pool_config['size']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']]) # noqa E501
|
||||||
|
|
||||||
elif user_pool_config['type']['value'] == 'erasure':
|
elif user_pool_config['type']['value'] == 'erasure':
|
||||||
args.extend([user_pool_config['erasure_profile']['value']])
|
args.extend([user_pool_config['erasure_profile']['value']])
|
||||||
|
|
||||||
if user_pool_config['crush_rule']['value'] != None:
|
if user_pool_config['crush_rule']['value'] is not None:
|
||||||
args.extend([user_pool_config['crush_rule']['value']])
|
args.extend([user_pool_config['crush_rule']['value']])
|
||||||
|
|
||||||
args.extend([ '--expected_num_objects', user_pool_config['expected_num_objects']['value'] , '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']])
|
args.extend(['--expected_num_objects', user_pool_config['expected_num_objects']['value'] , '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']]) # noqa E501
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
@ -400,12 +400,12 @@ def remove_pool(cluster, name, user, user_key, container_image=None):
|
||||||
|
|
||||||
args = ['rm', name, name, '--yes-i-really-really-mean-it']
|
args = ['rm', name, name, '--yes-i-really-really-mean-it']
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def update_pool(module, cluster, name, user, user_key, delta, container_image=None):
|
def update_pool(module, cluster, name, user, user_key, delta, container_image=None): # noqa E501
|
||||||
'''
|
'''
|
||||||
Update an existing pool
|
Update an existing pool
|
||||||
'''
|
'''
|
||||||
|
@ -414,24 +414,24 @@ def update_pool(module, cluster, name, user, user_key, delta, container_image=No
|
||||||
|
|
||||||
for key in delta.keys():
|
for key in delta.keys():
|
||||||
if key != 'application':
|
if key != 'application':
|
||||||
args = [ 'set', name, delta[key]['cli_set_opt'], delta[key]['value'] ]
|
args = ['set', name, delta[key]['cli_set_opt'], delta[key]['value']] # noqa E501
|
||||||
|
|
||||||
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image)
|
cmd = generate_ceph_cmd(cluster=cluster, args=args, user=user, user_key=user_key, container_image=container_image) # noqa E501
|
||||||
|
|
||||||
rc, cmd, out, err = exec_commands(module, cmd)
|
rc, cmd, out, err = exec_commands(module, cmd)
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
return rc, cmd, out, err
|
return rc, cmd, out, err
|
||||||
|
|
||||||
else:
|
else:
|
||||||
rc, cmd, out, err = exec_commands(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image))
|
rc, cmd, out, err = exec_commands(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa E501
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
return rc, cmd, out, err
|
return rc, cmd, out, err
|
||||||
|
|
||||||
rc, cmd, out, err = exec_commands(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image))
|
rc, cmd, out, err = exec_commands(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa E501
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
return rc, cmd, out, err
|
return rc, cmd, out, err
|
||||||
|
|
||||||
report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value'])
|
report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa E501
|
||||||
|
|
||||||
out = report
|
out = report
|
||||||
return rc, cmd, out, err
|
return rc, cmd, out, err
|
||||||
|
@ -453,6 +453,7 @@ def exit_module(module, out, rc, cmd, err, startd, changed=False):
|
||||||
)
|
)
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
|
||||||
def run_module():
|
def run_module():
|
||||||
module_args = dict(
|
module_args = dict(
|
||||||
cluster=dict(type='str', required=False, default='ceph'),
|
cluster=dict(type='str', required=False, default='ceph'),
|
||||||
|
@ -465,7 +466,7 @@ def run_module():
|
||||||
pgp_num=dict(type='str', required=False, default=None),
|
pgp_num=dict(type='str', required=False, default=None),
|
||||||
pg_autoscale_mode=dict(type='str', required=False, default='on'),
|
pg_autoscale_mode=dict(type='str', required=False, default='on'),
|
||||||
target_size_ratio=dict(type='str', required=False, default=None),
|
target_size_ratio=dict(type='str', required=False, default=None),
|
||||||
pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']),
|
pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']), # noqa E501
|
||||||
erasure_profile=dict(type='str', required=False, default='default'),
|
erasure_profile=dict(type='str', required=False, default='default'),
|
||||||
rule_name=dict(type='str', required=False, default=None),
|
rule_name=dict(type='str', required=False, default=None),
|
||||||
expected_num_objects=dict(type='str', required=False, default="0"),
|
expected_num_objects=dict(type='str', required=False, default="0"),
|
||||||
|
@ -490,7 +491,7 @@ def run_module():
|
||||||
|
|
||||||
if module.params.get('pg_autoscale_mode').lower() in ['true', 'on', 'yes']:
|
if module.params.get('pg_autoscale_mode').lower() in ['true', 'on', 'yes']:
|
||||||
pg_autoscale_mode = 'on'
|
pg_autoscale_mode = 'on'
|
||||||
elif module.params.get('pg_autoscale_mode').lower() in ['false', 'off', 'no']:
|
elif module.params.get('pg_autoscale_mode').lower() in ['false', 'off', 'no']: # noqa E501
|
||||||
pg_autoscale_mode = 'off'
|
pg_autoscale_mode = 'off'
|
||||||
else:
|
else:
|
||||||
pg_autoscale_mode = 'warn'
|
pg_autoscale_mode = 'warn'
|
||||||
|
@ -502,7 +503,7 @@ def run_module():
|
||||||
else:
|
else:
|
||||||
pool_type = module.params.get('pool_type')
|
pool_type = module.params.get('pool_type')
|
||||||
|
|
||||||
if module.params.get('rule_name') == None:
|
if module.params.get('rule_name') is None:
|
||||||
rule_name = 'replicated_rule' if pool_type == 'replicated' else None
|
rule_name = 'replicated_rule' if pool_type == 'replicated' else None
|
||||||
else:
|
else:
|
||||||
rule_name = module.params.get('rule_name')
|
rule_name = module.params.get('rule_name')
|
||||||
|
@ -510,14 +511,12 @@ def run_module():
|
||||||
erasure_profile = module.params.get('erasure_profile')
|
erasure_profile = module.params.get('erasure_profile')
|
||||||
expected_num_objects = module.params.get('expected_num_objects')
|
expected_num_objects = module.params.get('expected_num_objects')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
user_pool_config = {
|
user_pool_config = {
|
||||||
'pool_name': {'value': name},
|
'pool_name': {'value': name},
|
||||||
'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
|
'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
|
||||||
'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
|
'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
|
||||||
'pg_autoscale_mode': { 'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode' },
|
'pg_autoscale_mode': {'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode'}, # noqa E501
|
||||||
'target_size_ratio': { 'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio' },
|
'target_size_ratio': {'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio'}, # noqa E501
|
||||||
'application': {'value': application},
|
'application': {'value': application},
|
||||||
'type': {'value': pool_type},
|
'type': {'value': pool_type},
|
||||||
'erasure_profile': {'value': erasure_profile},
|
'erasure_profile': {'value': erasure_profile},
|
||||||
|
@ -566,32 +565,32 @@ def run_module():
|
||||||
}
|
}
|
||||||
|
|
||||||
if state == "present":
|
if state == "present":
|
||||||
rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image)
|
rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image) # noqa E501
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
for k, v in def_opt.items():
|
for k, v in def_opt.items():
|
||||||
if module.params[k] == None:
|
if module.params[k] is None:
|
||||||
user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']}
|
user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']} # noqa E501
|
||||||
else:
|
else:
|
||||||
user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']}
|
user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']} # noqa E501
|
||||||
rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image))
|
rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) # noqa E501
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image)
|
running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image) # noqa E501
|
||||||
user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' }
|
user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' } # noqa E501
|
||||||
delta = compare_pool_config(user_pool_config, running_pool_details[2])
|
delta = compare_pool_config(user_pool_config, running_pool_details[2]) # noqa E501
|
||||||
if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys():
|
if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys(): # noqa E501
|
||||||
rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image)
|
rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image) # noqa E501
|
||||||
if rc == 0:
|
if rc == 0:
|
||||||
changed = True
|
changed = True
|
||||||
else:
|
else:
|
||||||
out = "Pool {} already exists and there is nothing to update.".format(name)
|
out = "Pool {} already exists and there is nothing to update.".format(name) # noqa E501
|
||||||
else:
|
else:
|
||||||
rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image))
|
rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image)) # noqa E501
|
||||||
if user_pool_config['application']['value'] != None:
|
if user_pool_config['application']['value'] is not None:
|
||||||
_rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image))
|
_rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image)) # noqa E501
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
elif state == "list":
|
elif state == "list":
|
||||||
rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image))
|
rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image)) # noqa E501
|
||||||
if rc != 0:
|
if rc != 0:
|
||||||
out = "Couldn't list pool(s) present on the cluster"
|
out = "Couldn't list pool(s) present on the cluster"
|
||||||
|
|
||||||
|
@ -604,8 +603,7 @@ def run_module():
|
||||||
rc = 0
|
rc = 0
|
||||||
out = "Skipped, since pool {} doesn't exist".format(name)
|
out = "Skipped, since pool {} doesn't exist".format(name)
|
||||||
|
|
||||||
|
exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa E501
|
||||||
exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
|
@ -443,6 +444,7 @@ def list_osd(module, container_image):
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def list_storage_inventory(module, container_image):
|
def list_storage_inventory(module, container_image):
|
||||||
'''
|
'''
|
||||||
List storage inventory.
|
List storage inventory.
|
||||||
|
@ -454,6 +456,7 @@ def list_storage_inventory(module, container_image):
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def activate_osd():
|
def activate_osd():
|
||||||
'''
|
'''
|
||||||
Activate all the OSDs on a machine
|
Activate all the OSDs on a machine
|
||||||
|
@ -473,7 +476,7 @@ def is_lv(module, vg, lv, container_image):
|
||||||
Check if an LV exists
|
Check if an LV exists
|
||||||
'''
|
'''
|
||||||
|
|
||||||
args = [ '--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg) ]
|
args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa E501
|
||||||
|
|
||||||
cmd = build_cmd(args, container_image, binary='lvs')
|
cmd = build_cmd(args, container_image, binary='lvs')
|
||||||
|
|
||||||
|
@ -614,8 +617,7 @@ def run_module():
|
||||||
|
|
||||||
if out_dict:
|
if out_dict:
|
||||||
data = module.params['data']
|
data = module.params['data']
|
||||||
result['stdout'] = 'skipped, since {0} is already used for an osd'.format( # noqa E501
|
result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa E501
|
||||||
data)
|
|
||||||
result['rc'] = 0
|
result['rc'] = 0
|
||||||
module.exit_json(**result)
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
@ -637,16 +639,16 @@ def run_module():
|
||||||
skip = []
|
skip = []
|
||||||
for device_type in ['journal', 'data', 'db', 'wal']:
|
for device_type in ['journal', 'data', 'db', 'wal']:
|
||||||
# 1/ if we passed vg/lv
|
# 1/ if we passed vg/lv
|
||||||
if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None):
|
if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501
|
||||||
# 2/ check this is an actual lv/vg
|
# 2/ check this is an actual lv/vg
|
||||||
ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image)
|
ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa E501
|
||||||
skip.append(ret)
|
skip.append(ret)
|
||||||
# 3/ This isn't a lv/vg device
|
# 3/ This isn't a lv/vg device
|
||||||
if not ret:
|
if not ret:
|
||||||
module.params['{}_vg'.format(device_type)] = False
|
module.params['{}_vg'.format(device_type)] = False
|
||||||
module.params[device_type] = False
|
module.params[device_type] = False
|
||||||
# 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device
|
# 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa E501
|
||||||
elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None):
|
elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa E501
|
||||||
skip.append(True)
|
skip.append(True)
|
||||||
|
|
||||||
cmd = zap_devices(module, container_image)
|
cmd = zap_devices(module, container_image)
|
||||||
|
|
|
@ -69,6 +69,7 @@ author:
|
||||||
- 'Paul Cuzner'
|
- 'Paul Cuzner'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os # noqa E402
|
import os # noqa E402
|
||||||
import logging # noqa E402
|
import logging # noqa E402
|
||||||
from logging.handlers import RotatingFileHandler # noqa E402
|
from logging.handlers import RotatingFileHandler # noqa E402
|
||||||
|
@ -79,11 +80,10 @@ from ceph_iscsi_config.lun import LUN # noqa E402
|
||||||
from ceph_iscsi_config.utils import valid_size # noqa E402
|
from ceph_iscsi_config.utils import valid_size # noqa E402
|
||||||
import ceph_iscsi_config.settings as settings # noqa E402
|
import ceph_iscsi_config.settings as settings # noqa E402
|
||||||
|
|
||||||
|
|
||||||
# the main function is called ansible_main to allow the call stack
|
# the main function is called ansible_main to allow the call stack
|
||||||
# to be checked to determine whether the call to the ceph_iscsi_config
|
# to be checked to determine whether the call to the ceph_iscsi_config
|
||||||
# modules is from ansible or not
|
# modules is from ansible or not
|
||||||
|
|
||||||
|
|
||||||
def ansible_main():
|
def ansible_main():
|
||||||
|
|
||||||
# Define the fields needs to create/map rbd's the the host(s)
|
# Define the fields needs to create/map rbd's the the host(s)
|
||||||
|
|
|
@ -68,11 +68,11 @@ def delete_images(cfg):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
rbd_dev.delete()
|
rbd_dev.delete()
|
||||||
except rbd.ImageNotFound as err:
|
except rbd.ImageNotFound:
|
||||||
# Just log and ignore. If we crashed while purging we could delete
|
# Just log and ignore. If we crashed while purging we could delete
|
||||||
# the image but not removed it from the config
|
# the image but not removed it from the config
|
||||||
logger.debug("Image already deleted.")
|
logger.debug("Image already deleted.")
|
||||||
except rbd.ImageHasSnapshots as err:
|
except rbd.ImageHasSnapshots:
|
||||||
logger.error("Image still has snapshots.")
|
logger.error("Image still has snapshots.")
|
||||||
# Older versions of ceph-iscsi-config do not have a error_msg
|
# Older versions of ceph-iscsi-config do not have a error_msg
|
||||||
# string.
|
# string.
|
||||||
|
@ -81,9 +81,9 @@ def delete_images(cfg):
|
||||||
|
|
||||||
if rbd_dev.error:
|
if rbd_dev.error:
|
||||||
if rbd_dev.error_msg:
|
if rbd_dev.error_msg:
|
||||||
logger.error("Could not remove {}. Error: {}. Manually run the "
|
logger.error("Could not remove {}. Error: {}. Manually run the " # noqa E501
|
||||||
"rbd command line tool to delete.".
|
"rbd command line tool to delete.".
|
||||||
format(image, rbd_error_msg))
|
format(image, rbd_dev.error_msg))
|
||||||
else:
|
else:
|
||||||
logger.error("Could not remove {}. Manually run the rbd "
|
logger.error("Could not remove {}. Manually run the rbd "
|
||||||
"command line tool to delete.".format(image))
|
"command line tool to delete.".format(image))
|
||||||
|
@ -92,13 +92,14 @@ def delete_images(cfg):
|
||||||
|
|
||||||
return changes_made
|
return changes_made
|
||||||
|
|
||||||
def delete_gateway_config(cfg):
|
|
||||||
|
def delete_gateway_config(cfg, module):
|
||||||
ioctx = cfg._open_ioctx()
|
ioctx = cfg._open_ioctx()
|
||||||
try:
|
try:
|
||||||
size, mtime = ioctx.stat(cfg.config_name)
|
size, mtime = ioctx.stat(cfg.config_name)
|
||||||
except rados.ObjectNotFound:
|
except rados.ObjectNotFound:
|
||||||
logger.debug("gateway.conf already removed.")
|
logger.debug("gateway.conf already removed.")
|
||||||
return false
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ioctx.remove_object(cfg.config_name)
|
ioctx.remove_object(cfg.config_name)
|
||||||
|
@ -128,7 +129,7 @@ def ansible_main():
|
||||||
#
|
#
|
||||||
# Purge gateway configuration, if the config has gateways
|
# Purge gateway configuration, if the config has gateways
|
||||||
if run_mode == 'gateway':
|
if run_mode == 'gateway':
|
||||||
changes_made = delete_gateway_config(cfg)
|
changes_made = delete_gateway_config(cfg, module)
|
||||||
elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0:
|
elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0:
|
||||||
#
|
#
|
||||||
# Remove the disks on this host, that have been registered in the
|
# Remove the disks on this host, that have been registered in the
|
||||||
|
|
Loading…
Reference in New Issue