podman: support podman installation on rhel8

Add required changes to support podman on rhel8

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1667101

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
pull/3571/head
Guillaume Abrioux 2019-01-16 10:27:23 +01:00 committed by Sébastien Han
parent fd222a8bbf
commit 16efdbc59b
10 changed files with 53 additions and 30 deletions

View File

@ -651,6 +651,7 @@ def run_module():
module, list_keys(cluster, user, user_key, container_image))
if rc != 0:
result["stdout"] = "failed to retrieve ceph keys".format(name)
result["sdterr"] = err
result['rc'] = 0
module.exit_json(**result)

View File

@ -193,7 +193,7 @@ def container_exec(binary, container_image):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
os.path.join('--entrypoint=' + binary),

View File

@ -46,7 +46,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
@ -62,7 +62,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
@ -130,7 +130,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
@ -162,7 +162,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
@ -185,7 +185,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
@ -233,7 +233,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
@ -282,7 +282,7 @@ class TestCephVolumeModule(object):
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',

View File

@ -16,6 +16,7 @@
- /var/lib/ceph/bootstrap-rbd
- /var/lib/ceph/bootstrap-rbd-mirror
- /var/run/ceph
- /var/log/ceph
- name: create ceph initial directories
file:

View File

@ -46,7 +46,7 @@
tags:
with_pkg
- name: red hat based systems tasks
- name: red hat 7 based systems tasks
block:
- name: install python-docker-py on red hat / centos
package:
@ -56,20 +56,36 @@
until: result is succeeded
tags:
with_pkg
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
when:
- ceph_docker_on_openstack
tags:
with_pkg
- name: start docker service
service:
name: docker
state: started
enabled: yes
tags:
with_pkg
when:
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version == '7'
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
- name: red hat 8 based systems tasks
block:
- name: install podman
package:
name: 'podman'
state: present
register: result
until: result is succeeded
tags:
with_pkg
when:
- ceph_docker_on_openstack
tags:
with_pkg
- name: start docker service
service:
name: docker
state: started
enabled: yes
tags:
with_pkg
- ansible_os_family == 'RedHat'
- ansible_distribution_major_version == '8'

View File

@ -19,7 +19,7 @@
- name: set_fact container_binary
set_fact:
container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}"
container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
when: containerized_deployment
# Set ceph_release to ceph_stable by default

View File

@ -83,7 +83,7 @@
- name: import admin keyring into mon keyring
command: >
{{ ceph_authtool_cmd }}
/var/lib/ceph/tmp/{{ cluster }}.mon.keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
/var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
when:
- not create_custom_admin_secret.get('skipped')
- cephx
@ -97,8 +97,8 @@
command: >
{{ ceph_mon_cmd }}
--cluster {{ cluster }}
--setuser ceph
--setgroup ceph
--setuser 167
--setgroup 167
--mkfs
-i {{ monitor_name }}
--fsid {{ fsid }}

View File

@ -41,7 +41,7 @@
- name: set_fact docker_exec_start_osd
set_fact:
docker_exec_start_osd: "{{ '{{ container_binary }} run --rm --privileged=true -v /var/run/udev/:/var/run/udev/:z -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
docker_exec_start_osd: "{{ '{{ container_binary }} run --rm --privileged=true -v /var/run/udev/:/var/run/udev/:z -v /run/lvm/:/run/lvm/ -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
- name: collect osd ids
shell: >

View File

@ -105,7 +105,7 @@ fi
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
{% endif -%}
{% if osd_scenario == 'lvm' -%}
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
-v /run/lvm/:/run/lvm/ \
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
-e OSD_ID="$1" \
--name=ceph-osd-"$1" \

View File

@ -60,7 +60,7 @@
- name: set_fact container_binary
set_fact:
container_binary: "{{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }}"
container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
- import_role:
name: ceph-defaults
@ -461,14 +461,19 @@
tasks:
- import_role:
name: ceph-defaults
- name: check if podman binary is present
stat:
path: /usr/bin/podman
register: podman_binary
- name: set_fact container_binary
set_fact:
container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
- name: get ceph status from the first monitor
command: >
{{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"