ceph_volume: support overriding bind-mounts

This makes it possible to call `podman run` with custom bind-mounts.

cephadm-adopt.yml playbook needs it for a very specific use case:

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2027411

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit b02d71c307)
pull/7010/head
Guillaume Abrioux 2021-11-30 09:52:59 +01:00
parent 1628347253
commit e3b2514e2b
2 changed files with 108 additions and 69 deletions

View File

@ -194,24 +194,40 @@ EXAMPLES = '''
'''
def container_exec(binary, container_image):
def container_exec(binary, container_image, mounts=None):
'''
Build the docker CLI to run a command inside a container
'''
_mounts = {}
_mounts['/run/lock/lvm'] = '/run/lock/lvm:z'
_mounts['/var/run/udev'] = '/var/run/udev:z'
_mounts['/dev'] = '/dev'
_mounts['/etc/ceph'] = '/etc/ceph:z'
_mounts['/run/lvm'] = '/run/lvm'
_mounts['/var/lib/ceph'] = '/var/lib/ceph:z'
_mounts['/var/log/ceph'] = '/var/log/ceph:z'
if mounts is None:
mounts = _mounts
else:
_mounts.update(mounts)
volumes = sum(
[['-v', '{}:{}'.format(src_dir, dst_dir)]
for src_dir, dst_dir in _mounts.items()], [])
container_binary = os.getenv('CEPH_CONTAINER_BINARY')
command_exec = [container_binary, 'run',
'--rm', '--privileged', '--net=host', '--ipc=host',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=' + binary, container_image]
'--rm',
'--privileged',
'--net=host',
'--ipc=host'] + volumes + \
['--entrypoint=' + binary, container_image]
return command_exec
def build_cmd(action, container_image, cluster='ceph', binary='ceph-volume'):
def build_cmd(action, container_image,
cluster='ceph',
binary='ceph-volume', mounts=None):
'''
Build the ceph-volume command
'''
@ -220,7 +236,7 @@ def build_cmd(action, container_image, cluster='ceph', binary='ceph-volume'):
if container_image:
cmd = container_exec(
binary, container_image)
binary, container_image, mounts=mounts)
else:
binary = [binary]
cmd = binary
@ -409,7 +425,10 @@ def list_osd(module, container_image):
# Build the CLI
action = ['lvm', 'list']
cmd = build_cmd(action, container_image, cluster)
cmd = build_cmd(action,
container_image,
cluster,
mounts={'/var/lib/ceph': '/var/lib/ceph:ro'})
if data:
cmd.append(data)
cmd.append('--format=json')

View File

@ -17,15 +17,26 @@ except ImportError:
print('You need the mock library installed on python2.x to run tests')
container_cmd = ['docker', 'run', '--rm', '--privileged',
'--net=host', '--ipc=host',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume']
def get_mounts(mounts=None):
volumes = {}
volumes['/run/lock/lvm'] = '/run/lock/lvm:z'
volumes['/var/run/udev'] = '/var/run/udev:z'
volumes['/dev'] = '/dev'
volumes['/etc/ceph'] = '/etc/ceph:z'
volumes['/run/lvm'] = '/run/lvm'
volumes['/var/lib/ceph'] = '/var/lib/ceph:z'
volumes['/var/log/ceph'] = '/var/log/ceph:z'
if mounts is not None:
volumes.update(mounts)
return sum([['-v', '{}:{}'.format(src_dir, dst_dir)] for src_dir, dst_dir in volumes.items()], [])
def get_container_cmd(mounts=None):
return ['docker', 'run', '--rm', '--privileged',
'--net=host', '--ipc=host'] + \
get_mounts(mounts) + ['--entrypoint=ceph-volume']
@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'})
@ -66,7 +77,7 @@ class TestCephVolumeModule(object):
def test_container_exec(self):
fake_binary = "ceph-volume"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image]
expected_command_list = get_container_cmd() + [fake_container_image]
result = ceph_volume.container_exec(fake_binary, fake_container_image)
assert result == expected_command_list
@ -74,7 +85,8 @@ class TestCephVolumeModule(object):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'}
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
expected_command_list = get_container_cmd() + \
[fake_container_image,
'--cluster',
'ceph',
'lvm',
@ -156,7 +168,11 @@ class TestCephVolumeModule(object):
fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
expected_command_list = get_container_cmd(
{
'/var/lib/ceph': '/var/lib/ceph:ro'
}) + \
[fake_container_image,
'--cluster',
'ceph',
'lvm',
@ -181,7 +197,8 @@ class TestCephVolumeModule(object):
def test_list_storage_inventory_container(self):
fake_module = MagicMock()
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
expected_command_list = get_container_cmd() + \
[fake_container_image,
'--cluster',
'ceph',
'inventory',
@ -198,7 +215,8 @@ class TestCephVolumeModule(object):
fake_action = "create"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
expected_command_list = get_container_cmd() + \
[fake_container_image,
'--cluster',
'ceph',
'lvm',
@ -240,7 +258,8 @@ class TestCephVolumeModule(object):
fake_action = "prepare"
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
expected_command_list = get_container_cmd() + \
[fake_container_image,
'--cluster',
'ceph',
'lvm',
@ -284,7 +303,8 @@ class TestCephVolumeModule(object):
'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
expected_command_list = get_container_cmd() + \
[fake_container_image,
'--cluster',
'ceph',
'lvm',