From 438da91b324a001ed0c5e52e4235ad0f3a9817e2 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Mon, 17 Jun 2024 16:35:10 +0200 Subject: [PATCH] Revert "nfs-ganesha support removal" This reverts commit 675667e1d60b7080dad7293f2954de23718c5042. Signed-off-by: Guillaume Abrioux (cherry picked from commit 59198f5bcdf2eca5cc99c25951d93513c508c01e) --- CONTRIBUTING.md | 1 + Vagrantfile | 48 +++++ contrib/vagrant_variables.yml.atomic | 1 + contrib/vagrant_variables.yml.linode | 1 + contrib/vagrant_variables.yml.openstack | 1 + dashboard.yml | 1 + docs/source/testing/scenarios.rst | 1 + group_vars/all.yml.sample | 39 ++++ group_vars/nfss.yml.sample | 131 ++++++++++++ infrastructure-playbooks/cephadm-adopt.yml | 81 ++++++- infrastructure-playbooks/cephadm.yml | 2 + infrastructure-playbooks/docker-to-podman.yml | 11 +- infrastructure-playbooks/gather-ceph-logs.yml | 1 + infrastructure-playbooks/purge-cluster.yml | 61 ++++++ infrastructure-playbooks/purge-dashboard.yml | 2 + infrastructure-playbooks/rolling_update.yml | 64 ++++++ ...inerized-to-containerized-ceph-daemons.yml | 55 +++++ .../take-over-existing-cluster.yml | 1 + plugins/callback/installer_checkpoint.py | 5 + roles/ceph-common/tasks/main.yml | 3 +- roles/ceph-common/tasks/selinux.yml | 3 +- .../tasks/fetch_image.yml | 1 + roles/ceph-defaults/defaults/main.yml | 39 ++++ roles/ceph-handler/handlers/main.yml | 6 + .../tasks/check_running_containers.yml | 8 + .../tasks/check_socket_non_container.yml | 8 + roles/ceph-handler/tasks/handler_nfss.yml | 28 +++ roles/ceph-handler/tasks/main.yml | 5 + .../templates/restart_nfs_daemon.sh.j2 | 26 +++ roles/ceph-infra/tasks/configure_firewall.yml | 34 +++ roles/ceph-nfs/LICENSE | 201 ++++++++++++++++++ roles/ceph-nfs/README.md | 3 + roles/ceph-nfs/defaults/main.yml | 122 +++++++++++ roles/ceph-nfs/meta/main.yml | 14 ++ roles/ceph-nfs/tasks/create_rgw_nfs_user.yml | 23 ++ roles/ceph-nfs/tasks/main.yml | 96 +++++++++ .../tasks/pre_requisite_container.yml | 108 ++++++++++ .../tasks/pre_requisite_non_container.yml | 96 +++++++++ .../pre_requisite_non_container_debian.yml | 80 +++++++ .../pre_requisite_non_container_red_hat.yml | 43 ++++ roles/ceph-nfs/tasks/start_nfs.yml | 105 +++++++++ roles/ceph-nfs/tasks/systemd.yml | 9 + roles/ceph-nfs/templates/ceph-nfs.service.j2 | 56 +++++ roles/ceph-nfs/templates/ganesha.conf.j2 | 124 +++++++++++ roles/ceph-nfs/templates/idmap.conf.j2 | 137 ++++++++++++ roles/ceph-nfs/templates/systemd-run.j2 | 27 +++ roles/ceph-validate/tasks/check_nfs.yml | 15 ++ roles/ceph-validate/tasks/main.yml | 4 + site-container.yml.sample | 39 ++++ site.yml.sample | 39 ++++ tests/conftest.py | 10 +- .../add-mdss/container/vagrant_variables.yml | 1 + .../functional/add-mdss/vagrant_variables.yml | 1 + .../add-mgrs/container/vagrant_variables.yml | 1 + .../functional/add-mgrs/vagrant_variables.yml | 1 + .../add-mons/container/vagrant_variables.yml | 1 + .../functional/add-mons/vagrant_variables.yml | 1 + .../add-osds/container/vagrant_variables.yml | 1 + .../functional/add-osds/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../add-rbdmirrors/vagrant_variables.yml | 1 + .../add-rgws/container/vagrant_variables.yml | 1 + .../functional/add-rgws/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../all-in-one/vagrant_variables.yml | 1 + tests/functional/all_daemons/container/hosts | 3 + .../container/vagrant_variables.yml | 1 + tests/functional/all_daemons/group_vars/nfss | 10 + tests/functional/all_daemons/hosts | 3 + .../all_daemons/vagrant_variables.yml | 1 + .../all_daemons_ipv6/container/hosts | 3 + .../container/vagrant_variables.yml | 1 + .../all_daemons_ipv6/group_vars/nfss | 10 + tests/functional/all_daemons_ipv6/hosts | 3 + .../all_daemons_ipv6/vagrant_variables.yml | 1 + tests/functional/cephadm/hosts | 3 + .../functional/cephadm/vagrant_variables.yml | 1 + tests/functional/collocation/container/hosts | 6 +- .../container/vagrant_variables.yml | 1 + tests/functional/collocation/hosts | 6 +- .../collocation/vagrant_variables.yml | 1 + tests/functional/dev_setup.yml | 14 ++ .../docker2podman/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../external_clients/vagrant_variables.yml | 1 + .../infra_lv_create/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../lvm-auto-discovery/vagrant_variables.yml | 1 + .../lvm-batch/container/vagrant_variables.yml | 1 + .../lvm-batch/vagrant_variables.yml | 1 + .../lvm-osds/container/vagrant_variables.yml | 1 + .../functional/lvm-osds/vagrant_variables.yml | 1 + .../vagrant_variables.yml | 1 + tests/functional/podman/hosts | 7 +- tests/functional/podman/vagrant_variables.yml | 1 + .../container/secondary/vagrant_variables.yml | 1 + .../rbdmirror/container/vagrant_variables.yml | 1 + .../rbdmirror/secondary/vagrant_variables.yml | 1 + .../rbdmirror/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../shrink_mds/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../shrink_mgr/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../shrink_mon/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../shrink_osd/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../shrink_rbdmirror/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../shrink_rgw/vagrant_variables.yml | 1 + .../container/vagrant_variables.yml | 1 + .../functional/subset_update/group_vars/nfss | 10 + .../subset_update/vagrant_variables.yml | 1 + .../functional/tests/nfs/test_nfs_ganesha.py | 48 +++++ tests/pytest.ini | 1 + vagrant_variables.yml.sample | 1 + 117 files changed, 2186 insertions(+), 12 deletions(-) create mode 100644 group_vars/nfss.yml.sample create mode 100644 roles/ceph-handler/tasks/handler_nfss.yml create mode 100644 roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 create mode 100644 roles/ceph-nfs/LICENSE create mode 100644 roles/ceph-nfs/README.md create mode 100644 roles/ceph-nfs/defaults/main.yml create mode 100644 roles/ceph-nfs/meta/main.yml create mode 100644 roles/ceph-nfs/tasks/create_rgw_nfs_user.yml create mode 100644 roles/ceph-nfs/tasks/main.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_container.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_non_container.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml create mode 100644 roles/ceph-nfs/tasks/start_nfs.yml create mode 100644 roles/ceph-nfs/tasks/systemd.yml create mode 100644 roles/ceph-nfs/templates/ceph-nfs.service.j2 create mode 100644 roles/ceph-nfs/templates/ganesha.conf.j2 create mode 100644 roles/ceph-nfs/templates/idmap.conf.j2 create mode 100644 roles/ceph-nfs/templates/systemd-run.j2 create mode 100644 roles/ceph-validate/tasks/check_nfs.yml create mode 100644 tests/functional/all_daemons/group_vars/nfss create mode 100644 tests/functional/all_daemons_ipv6/group_vars/nfss create mode 100644 tests/functional/subset_update/group_vars/nfss create mode 100644 tests/functional/tests/nfs/test_nfs_ganesha.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a92c2f9e7..05a78c7f5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,6 +62,7 @@ It means if you are pushing a patch modifying one of these files: - `./roles/ceph-rbd-mirror/defaults/main.yml` - `./roles/ceph-defaults/defaults/main.yml` - `./roles/ceph-osd/defaults/main.yml` +- `./roles/ceph-nfs/defaults/main.yml` - `./roles/ceph-client/defaults/main.yml` - `./roles/ceph-common/defaults/main.yml` - `./roles/ceph-mon/defaults/main.yml` diff --git a/Vagrantfile b/Vagrantfile index 1129d8513..04b465e83 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -20,6 +20,7 @@ NMONS = settings['mon_vms'] NOSDS = settings['osd_vms'] NMDSS = settings['mds_vms'] NRGWS = settings['rgw_vms'] +NNFSS = settings['nfs_vms'] NRBD_MIRRORS = settings['rbd_mirror_vms'] CLIENTS = settings['client_vms'] MGRS = settings['mgr_vms'] @@ -61,6 +62,7 @@ ansible_provision = proc do |ansible| 'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" }, 'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" }, 'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" }, + 'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" }, 'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" }, 'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" }, 'mgrs' => (0..MGRS - 1).map { |j| "#{LABEL_PREFIX}mgr#{j}" }, @@ -371,6 +373,52 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end end + (0..NNFSS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}nfs#{i}" do |nfs| + nfs.vm.hostname = "#{LABEL_PREFIX}nfs#{i}" + if ASSIGN_STATIC_IP && !IPV6 + nfs.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + + # Virtualbox + nfs.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + nfs.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + nfs.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + + # Parallels + nfs.vm.provider "parallels" do |prl| + prl.name = "ceph-nfs#{i}" + prl.memory = "#{MEMORY}" + end + + nfs.vm.provider :linode do |provider| + provider.label = nfs.vm.hostname + end + end + end + (0..NMDSS - 1).each do |i| config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds| mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}" diff --git a/contrib/vagrant_variables.yml.atomic b/contrib/vagrant_variables.yml.atomic index 20fa2b418..3adff7a88 100644 --- a/contrib/vagrant_variables.yml.atomic +++ b/contrib/vagrant_variables.yml.atomic @@ -7,6 +7,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 rbd_mirror_vms: 0 client_vms: 0 mgr_vms: 0 diff --git a/contrib/vagrant_variables.yml.linode b/contrib/vagrant_variables.yml.linode index 1352637f0..e62a3bffe 100644 --- a/contrib/vagrant_variables.yml.linode +++ b/contrib/vagrant_variables.yml.linode @@ -24,6 +24,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 1 rgw_vms: 0 +nfs_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/contrib/vagrant_variables.yml.openstack b/contrib/vagrant_variables.yml.openstack index 73da1918b..420c09c98 100644 --- a/contrib/vagrant_variables.yml.openstack +++ b/contrib/vagrant_variables.yml.openstack @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/dashboard.yml b/dashboard.yml index fd670601c..e998e1e53 100644 --- a/dashboard.yml +++ b/dashboard.yml @@ -7,6 +7,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ monitoring_group_name|default('monitoring') }}" gather_facts: false become: true diff --git a/docs/source/testing/scenarios.rst b/docs/source/testing/scenarios.rst index 525f19cb1..c05d91899 100644 --- a/docs/source/testing/scenarios.rst +++ b/docs/source/testing/scenarios.rst @@ -47,6 +47,7 @@ to follow (most of them are 1 line settings). osd_vms: 0 mds_vms: 0 rgw_vms: 0 + nfs_vms: 0 rbd_mirror_vms: 0 client_vms: 0 mgr_vms: 0 diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 325846ebe..aa293481f 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -52,6 +52,7 @@ dummy: #osd_group_name: osds #rgw_group_name: rgws #mds_group_name: mdss +#nfs_group_name: nfss #rbdmirror_group_name: rbdmirrors #client_group_name: clients #mgr_group_name: mgrs @@ -62,6 +63,7 @@ dummy: # - "{{ osd_group_name }}" # - "{{ rgw_group_name }}" # - "{{ mds_group_name }}" +# - "{{ nfs_group_name }}" # - "{{ rbdmirror_group_name }}" # - "{{ client_group_name }}" # - "{{ mgr_group_name }}" @@ -79,6 +81,7 @@ dummy: #ceph_osd_firewall_zone: public #ceph_rgw_firewall_zone: public #ceph_mds_firewall_zone: public +#ceph_nfs_firewall_zone: public #ceph_rbdmirror_firewall_zone: public #ceph_dashboard_firewall_zone: public #ceph_rgwloadbalancer_firewall_zone: public @@ -155,6 +158,13 @@ dummy: #ceph_stable_release: reef #ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" +#nfs_ganesha_stable: true # use stable repos for nfs-ganesha +#centos_release_nfs: centos-release-nfs-ganesha4 +#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu +#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com +#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA +#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu + # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # for more info read: https://github.com/ceph/ceph-ansible/issues/305 @@ -189,6 +199,13 @@ dummy: #ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack #ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) +#nfs_ganesha_dev: false # use development repos for nfs-ganesha + +# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman +# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous +#nfs_ganesha_flavor: "ceph_main" + + # REPOSITORY: CUSTOM # # Enabled when ceph_repository == 'custom' @@ -404,6 +421,10 @@ dummy: #handler_health_rgw_check_retries: 5 #handler_health_rgw_check_delay: 10 +# NFS handler checks +#handler_health_nfs_check_retries: 5 +#handler_health_nfs_check_delay: 10 + # RBD MIRROR handler checks #handler_health_rbd_mirror_check_retries: 5 #handler_health_rbd_mirror_check_delay: 10 @@ -425,6 +446,24 @@ dummy: #ceph_rbd_mirror_pool: "rbd" +############### +# NFS-GANESHA # +############### +# +# Access type options +# +# Enable NFS File access +# If set to true, then ganesha is set up to export the root of the +# Ceph filesystem, and ganesha's attribute and directory caching is disabled +# as much as possible since libcephfs clients also caches the same +# information. +# +# Set this to true to enable File access via NFS. Requires an MDS role. +#nfs_file_gw: false +# Set this to true to enable Object access via NFS. Requires an RGW role. +#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}" + + ################### # CONFIG OVERRIDE # ################### diff --git a/group_vars/nfss.yml.sample b/group_vars/nfss.yml.sample new file mode 100644 index 000000000..1fc46ff1a --- /dev/null +++ b/group_vars/nfss.yml.sample @@ -0,0 +1,131 @@ +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by generate_group_vars_sample.sh + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +# Even though NFS nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on RGW nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +#copy_admin_key: false + +# Whether docker container or systemd service should be enabled +# and started, it's useful to set it to false if nfs-ganesha +# service is managed by pacemaker +#ceph_nfs_enable_service: true + +# ceph-nfs systemd service uses ansible's hostname as an instance id, +# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not +# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in +# such case it's better to have constant instance id instead which +# can be set by 'ceph_nfs_service_suffix' +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" + +###################### +# NFS Ganesha Config # +###################### +#ceph_nfs_log_file: "/var/log/ganesha/ganesha.log" +#ceph_nfs_dynamic_exports: false +# If set to true then rados is used to store ganesha exports +# and client sessions information, this is useful if you +# run multiple nfs-ganesha servers in active/passive mode and +# want to do failover +#ceph_nfs_rados_backend: false +# Name of the rados object used to store a list of the export rados +# object URLS +#ceph_nfs_rados_export_index: "ganesha-export-index" +# Address ganesha service should listen on, by default ganesha listens on all +# addresses. (Note: ganesha ignores this parameter in current version due to +# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217) +# ceph_nfs_bind_addr: 0.0.0.0 + +# If set to true, then ganesha's attribute and directory caching is disabled +# as much as possible. Currently, ganesha caches by default. +# When using ganesha as CephFS's gateway, it is recommended to turn off +# ganesha's caching as the libcephfs clients also cache the same information. +# Note: Irrespective of this option's setting, ganesha's caching is disabled +# when setting 'nfs_file_gw' option as true. +#ceph_nfs_disable_caching: false + +# This is the file ganesha will use to control NFSv4 ID mapping +#ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf" + +# idmap configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# Example: +# idmap_conf_overrides: +# General: +# Domain: foo.domain.net +#idmap_conf_overrides: {} + +#################### +# FSAL Ceph Config # +#################### +#ceph_nfs_ceph_export_id: 20133 +#ceph_nfs_ceph_pseudo_path: "/cephfile" +#ceph_nfs_ceph_protocols: "3,4" +#ceph_nfs_ceph_access_type: "RW" +#ceph_nfs_ceph_user: "admin" +#ceph_nfs_ceph_squash: "Root_Squash" +#ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p" + +################### +# FSAL RGW Config # +################### +#ceph_nfs_rgw_export_id: 20134 +#ceph_nfs_rgw_pseudo_path: "/cephobject" +#ceph_nfs_rgw_protocols: "3,4" +#ceph_nfs_rgw_access_type: "RW" +#ceph_nfs_rgw_user: "cephnfs" +#ceph_nfs_rgw_squash: "Root_Squash" +#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p" +# Note: keys are optional and can be generated, but not on containerized, where +# they must be configered. +# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" +#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} + +################### +# CONFIG OVERRIDE # +################### + +# Ganesha configuration file override. +# These multiline strings will be appended to the contents of the blocks in ganesha.conf and +# must be in the correct ganesha.conf format seen here: +# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example +# +# Example: +# CACHEINODE { +# # Entries_HWMark = 100000; +# } +# +# ganesha_core_param_overrides: +# ganesha_ceph_export_overrides: +# ganesha_rgw_export_overrides: +# ganesha_rgw_section_overrides: +# ganesha_log_overrides: +# ganesha_conf_overrides: | +# CACHEINODE { +# # Entries_HWMark = 100000; +# } + +########## +# DOCKER # +########## + +#ceph_docker_image: "ceph/daemon" +#ceph_docker_image_tag: latest +#ceph_nfs_docker_extra_env: +#ceph_config_keys: [] # DON'T TOUCH ME + diff --git a/infrastructure-playbooks/cephadm-adopt.yml b/infrastructure-playbooks/cephadm-adopt.yml index 30f2b6f6b..7fedf18fc 100644 --- a/infrastructure-playbooks/cephadm-adopt.yml +++ b/infrastructure-playbooks/cephadm-adopt.yml @@ -35,6 +35,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ monitoring_group_name|default('monitoring') }}" become: true any_errors_fatal: true @@ -143,7 +144,8 @@ inventory_hostname in groups.get(mds_group_name, []) or inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) or - inventory_hostname in groups.get(rbdmirror_group_name, []) + inventory_hostname in groups.get(rbdmirror_group_name, []) or + inventory_hostname in groups.get(nfs_group_name, []) - name: Configure repository for installing cephadm when: containerized_deployment | bool @@ -1009,6 +1011,82 @@ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}" state: absent +- name: Stop and remove legacy ceph nfs daemons + hosts: "{{ nfs_group_name|default('nfss') }}" + tags: 'ceph_nfs_adopt' + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + tasks_from: create_rgw_nfs_user.yml + + - name: Enable ceph mgr nfs module + ceph_mgr_module: + name: "nfs" + cluster: "{{ cluster }}" + state: enable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Stop and disable ceph-nfs systemd service + ansible.builtin.service: + name: "ceph-nfs@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + + - name: Reset failed ceph-nfs systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + when: containerized_deployment | bool + + - name: Remove ceph-nfs systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-nfs@.service + - /etc/systemd/system/ceph-nfs@.service.d + + - name: Remove legacy ceph radosgw directory + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}" + state: absent + + - name: Create nfs ganesha cluster + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Create cephfs export + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + when: nfs_file_gw | bool + + - name: Create rgw export + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + when: nfs_obj_gw | bool + - name: Redeploy rbd-mirror daemons hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" become: true @@ -1321,6 +1399,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ monitoring_group_name|default('monitoring') }}" become: true gather_facts: false diff --git a/infrastructure-playbooks/cephadm.yml b/infrastructure-playbooks/cephadm.yml index b7f05209f..b08e7f21d 100644 --- a/infrastructure-playbooks/cephadm.yml +++ b/infrastructure-playbooks/cephadm.yml @@ -7,6 +7,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ monitoring_group_name|default('monitoring') }}" become: true gather_facts: false @@ -207,6 +208,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ monitoring_group_name|default('monitoring') }}" become: true gather_facts: false diff --git a/infrastructure-playbooks/docker-to-podman.yml b/infrastructure-playbooks/docker-to-podman.yml index 5034553fc..784a244a5 100644 --- a/infrastructure-playbooks/docker-to-podman.yml +++ b/infrastructure-playbooks/docker-to-podman.yml @@ -11,6 +11,7 @@ - osds - mdss - rgws + - nfss - rbdmirrors - clients - mgrs @@ -59,6 +60,7 @@ - "{{ osd_group_name | default('osds') }}" - "{{ mds_group_name | default('mdss') }}" - "{{ rgw_group_name | default('rgws') }}" + - "{{ nfs_group_name | default('nfss') }}" - "{{ mgr_group_name | default('mgrs') }}" - "{{ rbdmirror_group_name | default('rbdmirrors') }}" - "{{ monitoring_group_name | default('monitoring') }}" @@ -112,7 +114,8 @@ inventory_hostname in groups.get(mds_group_name, []) or inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) or - inventory_hostname in groups.get(rbdmirror_group_name, []) + inventory_hostname in groups.get(rbdmirror_group_name, []) or + inventory_hostname in groups.get(nfs_group_name, []) - name: Pulling alertmanager/grafana/prometheus images from docker daemon ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}" @@ -156,6 +159,12 @@ tasks_from: systemd.yml when: inventory_hostname in groups.get(mgr_group_name, []) + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + tasks_from: systemd.yml + when: inventory_hostname in groups.get(nfs_group_name, []) + - name: Import ceph-osd role ansible.builtin.import_role: name: ceph-osd diff --git a/infrastructure-playbooks/gather-ceph-logs.yml b/infrastructure-playbooks/gather-ceph-logs.yml index 759debfda..ede64e526 100644 --- a/infrastructure-playbooks/gather-ceph-logs.yml +++ b/infrastructure-playbooks/gather-ceph-logs.yml @@ -5,6 +5,7 @@ - osds - mdss - rgws + - nfss - rbdmirrors - clients - mgrs diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index b55e46849..d1a4114f9 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -37,6 +37,7 @@ - mdss - rgws - rbdmirrors + - nfss - clients - mgrs - monitoring @@ -57,6 +58,35 @@ ansible.builtin.import_role: name: ceph-defaults + - name: Nfs related tasks + when: groups[nfs_group_name] | default([]) | length > 0 + block: + - name: Get nfs nodes ansible facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups[nfs_group_name] }}" + run_once: true + + - name: Get all nfs-ganesha mount points + ansible.builtin.command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts + register: nfs_ganesha_mount_points + failed_when: false + changed_when: false + with_items: "{{ groups[nfs_group_name] }}" + + - name: Ensure nfs-ganesha mountpoint(s) are unmounted + ansible.posix.mount: + path: "{{ item.split(' ')[1] }}" + state: unmounted + with_items: + - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}" + when: item | length > 0 + - name: Ensure cephfs mountpoint(s) are unmounted ansible.builtin.command: umount -a -t ceph changed_when: false @@ -81,6 +111,33 @@ - ceph - libceph + +- name: Purge ceph nfs cluster + hosts: nfss + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop ceph nfss with systemd + ansible.builtin.service: + name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}" + state: stopped + failed_when: false + + - name: Remove ceph nfs directories for "{{ ansible_facts['hostname'] }}" + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /etc/ganesha + - /var/lib/nfs/ganesha + - /var/run/ganesha + - /etc/systemd/system/ceph-nfs@.service + + - name: Purge node-exporter hosts: - mons @@ -88,6 +145,7 @@ - mdss - rgws - rbdmirrors + - nfss - clients - mgrs - monitoring @@ -709,6 +767,7 @@ - mdss - rgws - rbdmirrors + - nfss - mgrs become: true tasks: @@ -787,6 +846,7 @@ - mdss - rgws - rbdmirrors + - nfss - clients - mgrs - monitoring @@ -1058,6 +1118,7 @@ - mdss - rgws - rbdmirrors + - nfss - mgrs - clients gather_facts: false # Already gathered previously diff --git a/infrastructure-playbooks/purge-dashboard.yml b/infrastructure-playbooks/purge-dashboard.yml index 97e57b615..7c9ae393b 100644 --- a/infrastructure-playbooks/purge-dashboard.yml +++ b/infrastructure-playbooks/purge-dashboard.yml @@ -42,6 +42,7 @@ - "{{ mds_group_name|default('mdss') }}" - "{{ rgw_group_name|default('rgws') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ client_group_name|default('clients') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ monitoring_group_name | default('monitoring') }}" @@ -58,6 +59,7 @@ - "{{ mds_group_name|default('mdss') }}" - "{{ rgw_group_name|default('rgws') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ client_group_name|default('clients') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ monitoring_group_name | default('monitoring') }}" diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 21bafa910..ff194d47b 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -46,6 +46,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ client_group_name|default('clients') }}" - "{{ monitoring_group_name|default('monitoring') }}" tags: always @@ -1030,6 +1031,68 @@ name: ceph-rbd-mirror +- name: Upgrade ceph nfs node + vars: + upgrade_ceph_packages: true + hosts: "{{ nfs_group_name|default('nfss') }}" + tags: nfss + serial: 1 + become: true + gather_facts: false + tasks: + # failed_when: false is here so that if we upgrade + # from a version of ceph that does not have nfs-ganesha + # then this task will not fail + - name: Stop ceph nfs + ansible.builtin.systemd: + name: nfs-ganesha + state: stopped + enabled: false + masked: true + failed_when: false + when: not containerized_deployment | bool + + - name: Systemd stop nfs container + ansible.builtin.systemd: + name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} + state: stopped + enabled: false + masked: true + failed_when: false + when: + - ceph_nfs_enable_service | bool + - containerized_deployment | bool + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + - name: Upgrade ceph client node vars: upgrade_ceph_packages: true @@ -1189,6 +1252,7 @@ - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" - "{{ monitoring_group_name|default('monitoring') }}" tags: monitoring gather_facts: false diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 851618ac0..5c9ce494d 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -43,6 +43,7 @@ - "{{ mds_group_name|default('mdss') }}" - "{{ rgw_group_name|default('rgws') }}" - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" become: true @@ -663,6 +664,60 @@ name: ceph-rbd-mirror +- name: Switching from non-containerized to containerized ceph nfs + + hosts: "{{ nfs_group_name|default('nfss') }}" + + vars: + containerized_deployment: true + nfs_group_name: nfss + + serial: 1 + become: true + pre_tasks: + + # failed_when: false is here because if we're + # working with a jewel cluster then ceph nfs + # will not exist + - name: Stop non-containerized ceph nfs(s) + ansible.builtin.service: + name: nfs-ganesha + state: stopped + enabled: false + failed_when: false + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + - name: Switching from non-containerized to containerized ceph-crash hosts: diff --git a/infrastructure-playbooks/take-over-existing-cluster.yml b/infrastructure-playbooks/take-over-existing-cluster.yml index 7d61f7088..228e86ae3 100644 --- a/infrastructure-playbooks/take-over-existing-cluster.yml +++ b/infrastructure-playbooks/take-over-existing-cluster.yml @@ -29,6 +29,7 @@ - osds - mdss - rgws + - nfss - rbdmirrors - clients - mgrs diff --git a/plugins/callback/installer_checkpoint.py b/plugins/callback/installer_checkpoint.py index 42b684a2c..de9234d21 100644 --- a/plugins/callback/installer_checkpoint.py +++ b/plugins/callback/installer_checkpoint.py @@ -26,6 +26,7 @@ class CallbackModule(CallbackBase): 'installer_phase_ceph_osd', 'installer_phase_ceph_mds', 'installer_phase_ceph_rgw', + 'installer_phase_ceph_nfs', 'installer_phase_ceph_rbdmirror', 'installer_phase_ceph_client', 'installer_phase_ceph_rgw_loadbalancer', @@ -58,6 +59,10 @@ class CallbackModule(CallbackBase): 'title': 'Install Ceph RGW', 'playbook': 'roles/ceph-rgw/tasks/main.yml' }, + 'installer_phase_ceph_nfs': { + 'title': 'Install Ceph NFS', + 'playbook': 'roles/ceph-nfs/tasks/main.yml' + }, 'installer_phase_ceph_rbdmirror': { 'title': 'Install Ceph RBD Mirror', 'playbook': 'roles/ceph-rbd-mirror/tasks/main.yml' diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 7488db717..1fdb3bbe5 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -59,4 +59,5 @@ ansible.builtin.include_tasks: selinux.yml when: - ansible_facts['os_family'] == 'RedHat' - - inventory_hostname in groups.get(rgwloadbalancer_group_name, []) + - inventory_hostname in groups.get(nfs_group_name, []) + or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) diff --git a/roles/ceph-common/tasks/selinux.yml b/roles/ceph-common/tasks/selinux.yml index 22e2d3f99..65459b58b 100644 --- a/roles/ceph-common/tasks/selinux.yml +++ b/roles/ceph-common/tasks/selinux.yml @@ -17,5 +17,6 @@ register: result until: result is succeeded when: - - inventory_hostname in groups.get(rgwloadbalancer_group_name, []) + - inventory_hostname in groups.get(nfs_group_name, []) + or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) - ansible_facts['distribution_major_version'] == '8' diff --git a/roles/ceph-container-common/tasks/fetch_image.yml b/roles/ceph-container-common/tasks/fetch_image.yml index 4816ea7c2..f222e4ba3 100644 --- a/roles/ceph-container-common/tasks/fetch_image.yml +++ b/roles/ceph-container-common/tasks/fetch_image.yml @@ -46,6 +46,7 @@ inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(rbdmirror_group_name, []) or + inventory_hostname in groups.get(nfs_group_name, []) or inventory_hostname in groups.get(monitoring_group_name, []) environment: HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 09904f9ad..bf1a13141 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -44,6 +44,7 @@ mon_group_name: mons osd_group_name: osds rgw_group_name: rgws mds_group_name: mdss +nfs_group_name: nfss rbdmirror_group_name: rbdmirrors client_group_name: clients mgr_group_name: mgrs @@ -54,6 +55,7 @@ adopt_label_group_names: - "{{ osd_group_name }}" - "{{ rgw_group_name }}" - "{{ mds_group_name }}" + - "{{ nfs_group_name }}" - "{{ rbdmirror_group_name }}" - "{{ client_group_name }}" - "{{ mgr_group_name }}" @@ -71,6 +73,7 @@ ceph_mgr_firewall_zone: public ceph_osd_firewall_zone: public ceph_rgw_firewall_zone: public ceph_mds_firewall_zone: public +ceph_nfs_firewall_zone: public ceph_rbdmirror_firewall_zone: public ceph_dashboard_firewall_zone: public ceph_rgwloadbalancer_firewall_zone: public @@ -147,6 +150,13 @@ ceph_stable_key: https://download.ceph.com/keys/release.asc ceph_stable_release: reef ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" +nfs_ganesha_stable: true # use stable repos for nfs-ganesha +centos_release_nfs: centos-release-nfs-ganesha4 +nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu +nfs_ganesha_apt_keyserver: keyserver.ubuntu.com +nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA +libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu + # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # for more info read: https://github.com/ceph/ceph-ansible/issues/305 @@ -181,6 +191,13 @@ ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) +nfs_ganesha_dev: false # use development repos for nfs-ganesha + +# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman +# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous +nfs_ganesha_flavor: "ceph_main" + + # REPOSITORY: CUSTOM # # Enabled when ceph_repository == 'custom' @@ -396,6 +413,10 @@ handler_health_mds_check_delay: 10 handler_health_rgw_check_retries: 5 handler_health_rgw_check_delay: 10 +# NFS handler checks +handler_health_nfs_check_retries: 5 +handler_health_nfs_check_delay: 10 + # RBD MIRROR handler checks handler_health_rbd_mirror_check_retries: 5 handler_health_rbd_mirror_check_delay: 10 @@ -417,6 +438,24 @@ health_osd_check_delay: 10 ceph_rbd_mirror_pool: "rbd" +############### +# NFS-GANESHA # +############### +# +# Access type options +# +# Enable NFS File access +# If set to true, then ganesha is set up to export the root of the +# Ceph filesystem, and ganesha's attribute and directory caching is disabled +# as much as possible since libcephfs clients also caches the same +# information. +# +# Set this to true to enable File access via NFS. Requires an MDS role. +nfs_file_gw: false +# Set this to true to enable Object access via NFS. Requires an RGW role. +nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}" + + ################### # CONFIG OVERRIDE # ################### diff --git a/roles/ceph-handler/handlers/main.yml b/roles/ceph-handler/handlers/main.yml index 440918c21..8f068943f 100644 --- a/roles/ceph-handler/handlers/main.yml +++ b/roles/ceph-handler/handlers/main.yml @@ -13,6 +13,7 @@ - "Restart ceph osds" - "Restart ceph mdss" - "Restart ceph rgws" + - "Restart ceph nfss" - "Restart ceph rbdmirrors" - "Restart ceph mgrs" register: tmpdirpath @@ -38,6 +39,11 @@ when: rgw_group_name in group_names listen: "Restart ceph rgws" + - name: Nfss handler + ansible.builtin.include_tasks: handler_nfss.yml + when: nfs_group_name in group_names + listen: "Restart ceph nfss" + - name: Rbdmirrors handler ansible.builtin.include_tasks: handler_rbdmirrors.yml when: rbdmirror_group_name in group_names diff --git a/roles/ceph-handler/tasks/check_running_containers.yml b/roles/ceph-handler/tasks/check_running_containers.yml index 2f6a40ff5..e78b7bec8 100644 --- a/roles/ceph-handler/tasks/check_running_containers.yml +++ b/roles/ceph-handler/tasks/check_running_containers.yml @@ -47,6 +47,14 @@ check_mode: false when: inventory_hostname in groups.get(rbdmirror_group_name, []) +- name: Check for a nfs container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'" + register: ceph_nfs_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(nfs_group_name, []) + - name: Check for a ceph-crash container ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'" register: ceph_crash_container_stat diff --git a/roles/ceph-handler/tasks/check_socket_non_container.yml b/roles/ceph-handler/tasks/check_socket_non_container.yml index ce390b1af..96c492ffc 100644 --- a/roles/ceph-handler/tasks/check_socket_non_container.yml +++ b/roles/ceph-handler/tasks/check_socket_non_container.yml @@ -197,6 +197,14 @@ - rbd_mirror_socket_stat.files | length > 0 - item.1.rc == 1 +- name: Check for a nfs ganesha pid + ansible.builtin.command: "pgrep ganesha.nfsd" + register: nfs_process + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(nfs_group_name, []) + - name: Check for a ceph-crash process ansible.builtin.command: pgrep ceph-crash changed_when: false diff --git a/roles/ceph-handler/tasks/handler_nfss.yml b/roles/ceph-handler/tasks/handler_nfss.yml new file mode 100644 index 000000000..dadfc1d8c --- /dev/null +++ b/roles/ceph-handler/tasks/handler_nfss.yml @@ -0,0 +1,28 @@ +--- +- name: Set _nfs_handler_called before restart + ansible.builtin.set_fact: + _nfs_handler_called: true + +- name: Copy nfs restart script + ansible.builtin.template: + src: restart_nfs_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph nfs daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh + when: + - hostvars[item]['handler_nfs_status'] | default(False) | bool + - hostvars[item]['_nfs_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[nfs_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _nfs_handler_called after restart + ansible.builtin.set_fact: + _nfs_handler_called: false diff --git a/roles/ceph-handler/tasks/main.yml b/roles/ceph-handler/tasks/main.yml index 776fed73a..c963b0115 100644 --- a/roles/ceph-handler/tasks/main.yml +++ b/roles/ceph-handler/tasks/main.yml @@ -23,6 +23,11 @@ handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(rgw_group_name, []) +- name: Set_fact handler_nfs_status + ansible.builtin.set_fact: + handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(nfs_group_name, []) + - name: Set_fact handler_rbd_status ansible.builtin.set_fact: handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}" diff --git a/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 new file mode 100644 index 000000000..c1571baa4 --- /dev/null +++ b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 @@ -0,0 +1,26 @@ +#!/bin/bash + +RETRIES="{{ handler_health_nfs_check_retries }}" +DELAY="{{ handler_health_nfs_check_delay }}" +NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}" +PID=/var/run/ganesha/ganesha.pid +{% if containerized_deployment | bool %} +DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}" +{% endif %} + +# First, restart the daemon +{% if containerized_deployment | bool -%} +systemctl restart $NFS_NAME +# Wait and ensure the pid exists after restarting the daemon +while [ $RETRIES -ne 0 ]; do + $DOCKER_EXEC test -f $PID && exit 0 + sleep $DELAY + let RETRIES=RETRIES-1 +done +# If we reach this point, it means the pid is not present. +echo "PID file ${PID} could not be found, which means Ganesha is not running. Showing $NFS_NAME unit logs now:" +journalctl -u $NFS_NAME +exit 1 +{% else %} +systemctl restart nfs-ganesha +{% endif %} diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index f40ccc8ca..a47986bd5 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -162,6 +162,40 @@ - mds_group_name is defined - mds_group_name in group_names + - name: Open ceph networks on nfs + ansible.posix.firewalld: + zone: "{{ ceph_nfs_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - nfs_group_name is defined + - nfs_group_name in group_names + + - name: Open nfs ports + ansible.posix.firewalld: + service: nfs + zone: "{{ ceph_nfs_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - nfs_group_name is defined + - nfs_group_name in group_names + + - name: Open nfs ports (portmapper) + ansible.posix.firewalld: + port: "111/tcp" + zone: "{{ ceph_nfs_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - nfs_group_name is defined + - nfs_group_name in group_names + - name: Open ceph networks on rbdmirror ansible.posix.firewalld: zone: "{{ ceph_rbdmirror_firewall_zone }}" diff --git a/roles/ceph-nfs/LICENSE b/roles/ceph-nfs/LICENSE new file mode 100644 index 000000000..4953f91ef --- /dev/null +++ b/roles/ceph-nfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2016] [Red Hat, Inc.] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-nfs/README.md b/roles/ceph-nfs/README.md new file mode 100644 index 000000000..b58db562b --- /dev/null +++ b/roles/ceph-nfs/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-nfs + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml new file mode 100644 index 000000000..5cfbe22a1 --- /dev/null +++ b/roles/ceph-nfs/defaults/main.yml @@ -0,0 +1,122 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +# Even though NFS nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on RGW nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false + +# Whether docker container or systemd service should be enabled +# and started, it's useful to set it to false if nfs-ganesha +# service is managed by pacemaker +ceph_nfs_enable_service: true + +# ceph-nfs systemd service uses ansible's hostname as an instance id, +# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not +# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in +# such case it's better to have constant instance id instead which +# can be set by 'ceph_nfs_service_suffix' +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" + +###################### +# NFS Ganesha Config # +###################### +ceph_nfs_log_file: "/var/log/ganesha/ganesha.log" +ceph_nfs_dynamic_exports: false +# If set to true then rados is used to store ganesha exports +# and client sessions information, this is useful if you +# run multiple nfs-ganesha servers in active/passive mode and +# want to do failover +ceph_nfs_rados_backend: false +# Name of the rados object used to store a list of the export rados +# object URLS +ceph_nfs_rados_export_index: "ganesha-export-index" +# Address ganesha service should listen on, by default ganesha listens on all +# addresses. (Note: ganesha ignores this parameter in current version due to +# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217) +# ceph_nfs_bind_addr: 0.0.0.0 + +# If set to true, then ganesha's attribute and directory caching is disabled +# as much as possible. Currently, ganesha caches by default. +# When using ganesha as CephFS's gateway, it is recommended to turn off +# ganesha's caching as the libcephfs clients also cache the same information. +# Note: Irrespective of this option's setting, ganesha's caching is disabled +# when setting 'nfs_file_gw' option as true. +ceph_nfs_disable_caching: false + +# This is the file ganesha will use to control NFSv4 ID mapping +ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf" + +# idmap configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# Example: +# idmap_conf_overrides: +# General: +# Domain: foo.domain.net +idmap_conf_overrides: {} + +#################### +# FSAL Ceph Config # +#################### +ceph_nfs_ceph_export_id: 20133 +ceph_nfs_ceph_pseudo_path: "/cephfile" +ceph_nfs_ceph_protocols: "3,4" +ceph_nfs_ceph_access_type: "RW" +ceph_nfs_ceph_user: "admin" +ceph_nfs_ceph_squash: "Root_Squash" +ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p" + +################### +# FSAL RGW Config # +################### +ceph_nfs_rgw_export_id: 20134 +ceph_nfs_rgw_pseudo_path: "/cephobject" +ceph_nfs_rgw_protocols: "3,4" +ceph_nfs_rgw_access_type: "RW" +ceph_nfs_rgw_user: "cephnfs" +ceph_nfs_rgw_squash: "Root_Squash" +ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p" +# Note: keys are optional and can be generated, but not on containerized, where +# they must be configered. +# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" +rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} + +################### +# CONFIG OVERRIDE # +################### + +# Ganesha configuration file override. +# These multiline strings will be appended to the contents of the blocks in ganesha.conf and +# must be in the correct ganesha.conf format seen here: +# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example +# +# Example: +# CACHEINODE { + # Entries_HWMark = 100000; +# } +# +# ganesha_core_param_overrides: +# ganesha_ceph_export_overrides: +# ganesha_rgw_export_overrides: +# ganesha_rgw_section_overrides: +# ganesha_log_overrides: +# ganesha_conf_overrides: | +# CACHEINODE { + # Entries_HWMark = 100000; +# } + +########## +# DOCKER # +########## + +ceph_docker_image: "ceph/daemon" +ceph_docker_image_tag: latest +ceph_nfs_docker_extra_env: +ceph_config_keys: [] # DON'T TOUCH ME diff --git a/roles/ceph-nfs/meta/main.yml b/roles/ceph-nfs/meta/main.yml new file mode 100644 index 000000000..53a674633 --- /dev/null +++ b/roles/ceph-nfs/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Daniel Gryniewicz + description: Installs Ceph NFS Gateway + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml new file mode 100644 index 000000000..587e3b299 --- /dev/null +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -0,0 +1,23 @@ +--- +- name: Create rgw nfs user "{{ ceph_nfs_rgw_user }}" + radosgw_user: + name: "{{ ceph_nfs_rgw_user }}" + cluster: "{{ cluster }}" + display_name: "RGW NFS User" + access_key: "{{ ceph_nfs_rgw_access_key | default(omit) }}" + secret_key: "{{ ceph_nfs_rgw_secret_key | default(omit) }}" + run_once: true + register: rgw_nfs_user + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: nfs_obj_gw | bool + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key + ansible.builtin.set_fact: + ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}" + ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: nfs_obj_gw | bool diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml new file mode 100644 index 000000000..acec88561 --- /dev/null +++ b/roles/ceph-nfs/tasks/main.yml @@ -0,0 +1,96 @@ +--- +# global/common requirement +- name: Stop nfs server service + ansible.builtin.systemd: + name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}" + state: stopped + enabled: false + failed_when: false + +- name: Include pre_requisite_non_container.yml + ansible.builtin.include_tasks: pre_requisite_non_container.yml + when: not containerized_deployment | bool + +- name: Include pre_requisite_container.yml + ansible.builtin.include_tasks: pre_requisite_container.yml + when: containerized_deployment | bool + +- name: Set_fact _rgw_hostname + ansible.builtin.set_fact: + _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}" + +- name: Set rgw parameter (log file) + ceph_config: + action: set + who: "client.rgw.{{ _rgw_hostname }}" + option: "log file" + value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}.log" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + loop: "{{ groups.get('nfss', []) }}" + +- name: Include create_rgw_nfs_user.yml + ansible.builtin.import_tasks: create_rgw_nfs_user.yml + when: groups.get(mon_group_name, []) | length > 0 + +- name: Install nfs-ganesha-selinux on RHEL 8 + ansible.builtin.package: + name: nfs-ganesha-selinux + state: present + register: result + until: result is succeeded + when: + - not containerized_deployment | bool + - inventory_hostname in groups.get(nfs_group_name, []) + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] == '8' + +# NOTE (leseb): workaround for issues with ganesha and librgw +- name: Add ganesha_t to permissive domain + community.general.selinux_permissive: + name: ganesha_t + permissive: true + failed_when: false + when: + - not containerized_deployment | bool + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['selinux']['status'] == 'enabled' + +- name: Nfs with external ceph cluster task related + when: + - groups.get(mon_group_name, []) | length == 0 + - ceph_nfs_ceph_user is defined + block: + - name: Create keyring directory + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0755" + with_items: + - "{{ ceph_nfs_ceph_user }}" + - "{{ ansible_facts['hostname'] }}" + + - name: Set_fact rgw_client_name + ansible.builtin.set_fact: + rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}" + + - name: Get client cephx keys + ansible.builtin.copy: + dest: "{{ item.1 }}" + content: "{{ item.0.content | b64decode }}" + mode: "{{ item.0.item.get('mode', '0600') }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + with_nested: + - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}" + - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"] + when: + - not item.0.get('skipped', False) + - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Include start_nfs.yml + ansible.builtin.import_tasks: start_nfs.yml diff --git a/roles/ceph-nfs/tasks/pre_requisite_container.yml b/roles/ceph-nfs/tasks/pre_requisite_container.yml new file mode 100644 index 000000000..023c8d079 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_container.yml @@ -0,0 +1,108 @@ +--- +- name: Keyring related tasks + when: groups.get(mon_group_name, []) | length > 0 + block: + - name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + + - name: Create directories + ansible.builtin.file: + path: "{{ item.0 }}" + state: "directory" + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "0755" + delegate_to: "{{ item.1 }}" + with_nested: + - ["/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", + "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}"] + - ["{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}"] + + - name: Set_fact keyrings_list + ansible.builtin.set_fact: + keyrings_list: + - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} } + - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} } + + - name: Create keyrings from a monitor + ceph_key: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + dest: "{{ item.path }}" + caps: "{{ item.caps }}" + import_key: true + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "0600" + no_log: "{{ no_log_on_ceph_key_tasks }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + loop: "{{ keyrings_list }}" + when: + - cephx | bool + - item.create | default(False) | bool + + - name: Get keys from monitors + ceph_key: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _rgw_keys + loop: "{{ keyrings_list }}" + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + when: + - cephx | bool + - item.copy_key | default(True) | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Debug + ansible.builtin.debug: + msg: "{{ _rgw_keys }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _rgw_keys.results }}" + when: + - cephx | bool + - item.item.copy_key | default(True) | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Dbus related tasks + when: ceph_nfs_dynamic_exports | bool + block: + - name: Get file + ansible.builtin.command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf" + register: dbus_ganesha_file + run_once: true + changed_when: false + + - name: Create dbus service file + ansible.builtin.copy: + content: "{{ dbus_ganesha_file.stdout }}" + dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf + owner: "root" + group: "root" + mode: "0644" + + - name: Reload dbus configuration + ansible.builtin.command: "killall -SIGHUP dbus-daemon" + changed_when: false diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml new file mode 100644 index 000000000..a13654c15 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml @@ -0,0 +1,96 @@ +--- +- name: Include red hat based system related tasks + ansible.builtin.include_tasks: pre_requisite_non_container_red_hat.yml + when: ansible_facts['os_family'] == 'RedHat' + +- name: Include debian based system related tasks + ansible.builtin.include_tasks: pre_requisite_non_container_debian.yml + when: ansible_facts['os_family'] == 'Debian' + +- name: Install nfs rgw/cephfs gateway - SUSE/openSUSE + community.general.zypper: + name: "{{ item.name }}" + disable_gpg_check: true + with_items: + - { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" } + - { name: 'radosgw', install: "{{ nfs_obj_gw }}" } + - { name: 'nfs-ganesha-ceph', install: "{{ nfs_file_gw }}" } + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ansible_facts['os_family'] == 'Suse' + - item.install | bool + register: result + until: result is succeeded + +# NOTE (leseb): we use root:ceph for permissions since ganesha +# does not have the right selinux context to read ceph directories. +- name: Create rados gateway and ganesha directories + ansible.builtin.file: + path: "{{ item.name }}" + state: directory + owner: "{{ item.owner | default('ceph') }}" + group: "{{ item.group | default('ceph') }}" + mode: "{{ ceph_directories_mode }}" + with_items: + - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" } + - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" } + - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" } + - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" } + - { name: "/var/log/ceph", create: true } + - { name: "/var/log/ganesha", create: true, owner: root, group: root } + - { name: "/var/run/ceph", create: true } + when: item.create | bool + +- name: Cephx related tasks + when: + - cephx | bool + - groups.get(mon_group_name, []) | length > 0 + block: + - name: Get keys from monitors + ceph_key: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + register: _rgw_keys + with_items: + - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + when: + - cephx | bool + - item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _rgw_keys.results }}" + when: + - cephx | bool + - item.item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Nfs object gateway related tasks + when: nfs_obj_gw | bool + block: + - name: Create rados gateway keyring + ceph_key: + name: "client.rgw.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + user: client.bootstrap-rgw + user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring" + caps: + mon: "allow rw" + osd: "allow rwx" + dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring" + import_key: false + owner: ceph + group: ceph + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml new file mode 100644 index 000000000..b0848f808 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml @@ -0,0 +1,80 @@ +--- +- name: Debian based systems - repo handling + when: ceph_origin == 'repository' + block: + - name: Stable repos specific tasks + when: + - nfs_ganesha_stable | bool + - ceph_repository == 'community' + block: + - name: Add nfs-ganesha stable repository + ansible.builtin.apt_repository: + repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" + state: present + update_cache: false + register: add_ganesha_apt_repo + + - name: Add libntirpc stable repository + ansible.builtin.apt_repository: + repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" + state: present + update_cache: false + register: add_libntirpc_apt_repo + when: libntirpc_stable_deb_repo is defined + + - name: Add nfs-ganesha ppa apt key + ansible.builtin.apt_key: + keyserver: "{{ nfs_ganesha_apt_keyserver }}" + id: "{{ nfs_ganesha_apt_key_id }}" + when: + - nfs_ganesha_apt_key_id is defined + - nfs_ganesha_apt_keyserver is defined + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: update_ganesha_apt_cache + retries: 5 + delay: 2 + until: update_ganesha_apt_cache is success + when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed + + - name: Debian based systems - dev repos specific tasks + when: + - nfs_ganesha_dev | bool + - ceph_repository == 'dev' + block: + - name: Fetch nfs-ganesha development repository + ansible.builtin.uri: + url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}" + return_content: true + register: nfs_ganesha_dev_apt_repo + + - name: Add nfs-ganesha development repository + ansible.builtin.copy: + content: "{{ nfs_ganesha_dev_apt_repo.content }}" + dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list + owner: root + group: root + backup: true + mode: "0644" + +- name: Debain based systems - install required packages + block: + - name: Debian based systems + when: ceph_origin == 'repository' or ceph_origin == 'distro' + block: + - name: Install nfs rgw/cephfs gateway - debian + ansible.builtin.apt: + name: ['nfs-ganesha-rgw', 'radosgw'] + allow_unauthenticated: true + register: result + until: result is succeeded + when: nfs_obj_gw | bool + - name: Install nfs rgw/cephfs gateway - debian + ansible.builtin.apt: + name: nfs-ganesha-ceph + allow_unauthenticated: true + register: result + until: result is succeeded + when: nfs_file_gw | bool diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml new file mode 100644 index 000000000..92a444822 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml @@ -0,0 +1,43 @@ +--- +- name: Red hat based systems - repo handling + when: ceph_origin == 'repository' + block: + - name: Red hat based systems - stable repo related tasks + when: + - nfs_ganesha_stable | bool + - ceph_repository == 'community' + block: + - name: Add nfs-ganesha stable repository + ansible.builtin.package: + name: "{{ centos_release_nfs }}" + state: present + + - name: Red hat based systems - dev repo related tasks + when: + - nfs_ganesha_dev | bool + - ceph_repository == 'dev' + block: + - name: Add nfs-ganesha dev repo + ansible.builtin.get_url: + url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}" + dest: /etc/yum.repos.d/nfs-ganesha-dev.repo + mode: "0644" + force: true + +- name: Red hat based systems - install nfs packages + block: + - name: Install nfs cephfs gateway + ansible.builtin.package: + name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace'] + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: nfs_file_gw | bool + + - name: Install redhat nfs-ganesha-rgw and ceph-radosgw packages + ansible.builtin.package: + name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw'] + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: nfs_obj_gw | bool diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml new file mode 100644 index 000000000..45e7a26fd --- /dev/null +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -0,0 +1,105 @@ +--- +- name: Nfs various pre-requisites tasks + block: + - name: Set_fact exec_cmd_nfs - external + ansible.builtin.set_fact: + exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring" + delegate_node: "{{ inventory_hostname }}" + when: groups.get(mon_group_name, []) | length == 0 + + - name: Set_fact exec_cmd_nfs - internal + ansible.builtin.set_fact: + exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados" + delegate_node: "{{ groups[mon_group_name][0] }}" + when: groups.get(mon_group_name, []) | length > 0 + + - name: Check if rados index object exists + ansible.builtin.shell: "set -o pipefail && {{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls | grep {{ ceph_nfs_rados_export_index }}" + changed_when: false + failed_when: false + register: rados_index_exists + check_mode: false + when: ceph_nfs_rados_backend | bool + delegate_to: "{{ delegate_node }}" + run_once: true + + - name: Create an empty rados index object + ansible.builtin.command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" + when: + - ceph_nfs_rados_backend | bool + - rados_index_exists.rc != 0 + delegate_to: "{{ delegate_node }}" + changed_when: false + run_once: true + +- name: Create /etc/ganesha + ansible.builtin.file: + path: /etc/ganesha + state: directory + owner: root + group: root + mode: "0755" + +- name: Generate ganesha configuration file + ansible.builtin.template: + src: "ganesha.conf.j2" + dest: /etc/ganesha/ganesha.conf + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph nfss + +- name: Generate ganesha idmap.conf file + openstack.config_template.config_template: + src: "idmap.conf.j2" + dest: "{{ ceph_nfs_idmap_conf }}" + owner: "root" + group: "root" + mode: "0644" + config_overrides: "{{ idmap_conf_overrides }}" + config_type: ini + notify: Restart ceph nfss + +- name: Create exports directory + ansible.builtin.file: + path: /etc/ganesha/export.d + state: directory + owner: "root" + group: "root" + mode: "0755" + when: ceph_nfs_dynamic_exports | bool + +- name: Create exports dir index file + ansible.builtin.copy: + content: "" + force: false + dest: /etc/ganesha/export.d/INDEX.conf + owner: "root" + group: "root" + mode: "0644" + when: ceph_nfs_dynamic_exports | bool + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + when: containerized_deployment | bool + +- name: Systemd start nfs container + ansible.builtin.systemd: + name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} + state: started + enabled: true + masked: false + daemon_reload: true + when: + - containerized_deployment | bool + - ceph_nfs_enable_service | bool + +- name: Start nfs gateway service + ansible.builtin.systemd: + name: nfs-ganesha + state: started + enabled: true + masked: false + when: + - not containerized_deployment | bool + - ceph_nfs_enable_service | bool diff --git a/roles/ceph-nfs/tasks/systemd.yml b/roles/ceph-nfs/tasks/systemd.yml new file mode 100644 index 000000000..1534cf4fd --- /dev/null +++ b/roles/ceph-nfs/tasks/systemd.yml @@ -0,0 +1,9 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-nfs.service.j2" + dest: /etc/systemd/system/ceph-nfs@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph nfss diff --git a/roles/ceph-nfs/templates/ceph-nfs.service.j2 b/roles/ceph-nfs/templates/ceph-nfs.service.j2 new file mode 100644 index 000000000..663faedd4 --- /dev/null +++ b/roles/ceph-nfs/templates/ceph-nfs.service.j2 @@ -0,0 +1,56 @@ +[Unit] +Description=NFS-Ganesha file server +Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph /var/log/ganesha +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i +ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha +ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +-v /etc/ceph:/etc/ceph:z \ +-v /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:z \ +-v /var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring:/etc/ceph/keyring:z \ +-v /etc/ganesha:/etc/ganesha:z \ +-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \ +-v /var/run/ceph:/var/run/ceph:z \ +-v /var/log/ceph:/var/log/ceph:z \ +-v /var/log/ganesha:/var/log/ganesha:z \ +-v /etc/localtime:/etc/localtime:ro \ +{{ ceph_nfs_docker_extra_env }} \ +--entrypoint=/usr/bin/ganesha.nfsd \ +--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \ +{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ +-F -L STDOUT +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/ceph-nfs/templates/ganesha.conf.j2 b/roles/ceph-nfs/templates/ganesha.conf.j2 new file mode 100644 index 000000000..7e6fab6c5 --- /dev/null +++ b/roles/ceph-nfs/templates/ganesha.conf.j2 @@ -0,0 +1,124 @@ +#jinja2: trim_blocks: "true", lstrip_blocks: "true" +# {{ ansible_managed }} + +{% if ceph_nfs_dynamic_exports | bool and not ceph_nfs_rados_backend | bool %} +%include /etc/ganesha/export.d/INDEX.conf +{% endif %} + +NFS_Core_Param +{ +{% if ceph_nfs_bind_addr is defined %} + Bind_Addr={{ ceph_nfs_bind_addr }}; +{% endif %} +{{ ganesha_core_param_overrides | default(None) }} +} + +{% if ceph_nfs_disable_caching | bool or nfs_file_gw | bool %} +EXPORT_DEFAULTS { + Attr_Expiration_Time = 0; +} + +CACHEINODE { + Dir_Chunk = 0; + + NParts = 1; + Cache_Size = 1; +} +{% endif %} + +{% if ceph_nfs_rados_backend | bool %} +RADOS_URLS { + ceph_conf = '/etc/ceph/{{ cluster }}.conf'; + userid = "{{ ceph_nfs_ceph_user }}"; +} +%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }} + +NFSv4 { + RecoveryBackend = 'rados_kv'; + IdmapConf = "{{ ceph_nfs_idmap_conf }}"; +} +RADOS_KV { + ceph_conf = '/etc/ceph/{{ cluster }}.conf'; + userid = "{{ ceph_nfs_ceph_user }}"; + pool = "{{ cephfs_data_pool.name }}"; +} +{% endif %} + +{% if nfs_file_gw | bool %} +EXPORT +{ + Export_id={{ ceph_nfs_ceph_export_id }}; + + Path = "/"; + + Pseudo = {{ ceph_nfs_ceph_pseudo_path }}; + + Access_Type = {{ ceph_nfs_ceph_access_type }}; + + Protocols = {{ ceph_nfs_ceph_protocols }}; + + Transports = TCP; + + SecType = {{ ceph_nfs_ceph_sectype }}; + + Squash = {{ ceph_nfs_ceph_squash }}; + + Attr_Expiration_Time = 0; + + FSAL { + Name = CEPH; + User_Id = "{{ ceph_nfs_ceph_user }}"; + } + + {{ ganesha_ceph_export_overrides | default(None) }} +} +{% endif %} +{% if nfs_obj_gw | bool %} +EXPORT +{ + Export_id={{ ceph_nfs_rgw_export_id }}; + + Path = "/"; + + Pseudo = {{ ceph_nfs_rgw_pseudo_path }}; + + Access_Type = {{ ceph_nfs_rgw_access_type }}; + + Protocols = {{ ceph_nfs_rgw_protocols }}; + + Transports = TCP; + + SecType = {{ ceph_nfs_rgw_sectype }}; + + Squash = {{ ceph_nfs_rgw_squash }}; + + FSAL { + Name = RGW; + User_Id = "{{ ceph_nfs_rgw_user }}"; + Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}"; + Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}"; + } + + {{ ganesha_rgw_export_overrides | default(None) }} + +} + +RGW { + ceph_conf = "/etc/ceph/{{ cluster }}.conf"; + cluster = "{{ cluster }}"; + name = "{{ rgw_client_name }}"; + {{ ganesha_rgw_section_overrides | default(None) }} +} +{% endif %} + +LOG { + Facility { + name = FILE; + destination = "{{ ceph_nfs_log_file }}"; + enable = active; + } + + {{ ganesha_log_overrides | default(None) }} +} + +{{ ganesha_conf_overrides | default(None) }} diff --git a/roles/ceph-nfs/templates/idmap.conf.j2 b/roles/ceph-nfs/templates/idmap.conf.j2 new file mode 100644 index 000000000..d052232d2 --- /dev/null +++ b/roles/ceph-nfs/templates/idmap.conf.j2 @@ -0,0 +1,137 @@ +[General] +#Verbosity = 0 +# The following should be set to the local NFSv4 domain name +# The default is the host's DNS domain name. +#Domain = local.domain.edu + +# In multi-domain environments, some NFS servers will append the identity +# management domain to the owner and owner_group in lieu of a true NFSv4 +# domain. This option can facilitate lookups in such environments. If +# set to a value other than "none", the nsswitch plugin will first pass +# the name to the password/group lookup function without stripping the +# domain off. If that mapping fails then the plugin will try again using +# the old method (comparing the domain in the string to the Domain value, +# stripping it if it matches, and passing the resulting short name to the +# lookup function). Valid values are "user", "group", "both", and +# "none". The default is "none". +#No-Strip = none + +# Winbind has a quirk whereby doing a group lookup in UPN format +# (e.g. staff@americas.example.com) will cause the group to be +# displayed prefixed with the full domain in uppercase +# (e.g. AMERICAS.EXAMPLE.COM\staff) instead of in the familiar netbios +# name format (e.g. AMERICAS\staff). Setting this option to true +# causes the name to be reformatted before passing it to the group +# lookup function in order to work around this. This setting is +# ignored unless No-Strip is set to either "both" or "group". +# The default is "false". +#Reformat-Group = false + +# The following is a comma-separated list of Kerberos realm +# names that should be considered to be equivalent to the +# local realm, such that @REALM.A can be assumed to +# be the same user as @REALM.B +# If not specified, the default local realm is the domain name, +# which defaults to the host's DNS domain name, +# translated to upper-case. +# Note that if this value is specified, the local realm name +# must be included in the list! +#Local-Realms = + +[Mapping] + +#Nobody-User = nobody +#Nobody-Group = nobody + +[Translation] + +# Translation Method is an comma-separated, ordered list of +# translation methods that can be used. Distributed methods +# include "nsswitch", "umich_ldap", and "static". Each method +# is a dynamically loadable plugin library. +# New methods may be defined and inserted in the list. +# The default is "nsswitch". +#Method = nsswitch + +# Optional. This is a comma-separated, ordered list of +# translation methods to be used for translating GSS +# authenticated names to ids. +# If this option is omitted, the same methods as those +# specified in "Method" are used. +#GSS-Methods = + +#-------------------------------------------------------------------# +# The following are used only for the "static" Translation Method. +#-------------------------------------------------------------------# +[Static] + +# A "static" list of GSS-Authenticated names to +# local user name mappings + +#someuser@REALM = localuser + + +#-------------------------------------------------------------------# +# The following are used only for the "umich_ldap" Translation Method. +#-------------------------------------------------------------------# + +[UMICH_SCHEMA] + +# server information (REQUIRED) +LDAP_server = ldap-server.local.domain.edu + +# the default search base (REQUIRED) +LDAP_base = dc=local,dc=domain,dc=edu + +#-----------------------------------------------------------# +# The remaining options have defaults (as shown) +# and are therefore not required. +#-----------------------------------------------------------# + +# whether or not to perform canonicalization on the +# name given as LDAP_server +#LDAP_canonicalize_name = true + +# absolute search base for (people) accounts +#LDAP_people_base = + +# absolute search base for groups +#LDAP_group_base = + +# Set to true to enable SSL - anything else is not enabled +#LDAP_use_ssl = false + +# You must specify a CA certificate location if you enable SSL +#LDAP_ca_cert = /etc/ldapca.cert + +# Objectclass mapping information + +# Mapping for the person (account) object class +#NFSv4_person_objectclass = NFSv4RemotePerson + +# Mapping for the nfsv4name attribute the person object +#NFSv4_name_attr = NFSv4Name + +# Mapping for the UID number +#NFSv4_uid_attr = UIDNumber + +# Mapping for the GSSAPI Principal name +#GSS_principal_attr = GSSAuthName + +# Mapping for the account name attribute (usually uid) +# The value for this attribute must match the value of +# the group member attribute - NFSv4_member_attr +#NFSv4_acctname_attr = uid + +# Mapping for the group object class +#NFSv4_group_objectclass = NFSv4RemoteGroup + +# Mapping for the GID attribute +#NFSv4_gid_attr = GIDNumber + +# Mapping for the Group NFSv4 name +#NFSv4_group_attr = NFSv4Name + +# Mapping for the Group member attribute (usually memberUID) +# The value of this attribute must match the value of NFSv4_acctname_attr +#NFSv4_member_attr = memberUID \ No newline at end of file diff --git a/roles/ceph-nfs/templates/systemd-run.j2 b/roles/ceph-nfs/templates/systemd-run.j2 new file mode 100644 index 000000000..868cd19de --- /dev/null +++ b/roles/ceph-nfs/templates/systemd-run.j2 @@ -0,0 +1,27 @@ +#!/bin/sh +T=$1 +N=$2 + +# start nfs-ganesha +/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +-v /var/lib/ceph:/var/lib/ceph:z \ +-v /etc/ceph:/etc/ceph:z \ +-v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \ +-v /etc/ganesha:/etc/ganesha:z \ +-v /var/run/ceph:/var/run/ceph:z \ +-v /var/log/ceph:/var/log/ceph:z \ +-v /var/log/ganesha:/var/log/ganesha:z \ +{% if ceph_nfs_dynamic_exports | bool %} +--privileged \ +-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \ +{% endif -%} +-v /etc/localtime:/etc/localtime:ro \ +{{ ceph_nfs_docker_extra_env }} \ +--entrypoint=/usr/bin/ganesha.nfsd \ +--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \ +{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} +-F -L STDOUT "${GANESHA_EPOCH}" diff --git a/roles/ceph-validate/tasks/check_nfs.yml b/roles/ceph-validate/tasks/check_nfs.yml new file mode 100644 index 000000000..2c26aa4be --- /dev/null +++ b/roles/ceph-validate/tasks/check_nfs.yml @@ -0,0 +1,15 @@ +--- +- name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone) + ansible.builtin.fail: + msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True" + when: + - nfs_obj_gw | bool + - groups.get(mon_group_name, []) | length == 0 + - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined) + +- name: Fail on openSUSE Leap 15.x using distro packages + ansible.builtin.fail: + msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')" + when: + - ceph_origin == 'distro' + - ansible_facts['distribution'] == 'openSUSE Leap' diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index a050788a6..885ffb36a 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -112,6 +112,10 @@ - inventory_hostname in groups.get(rgw_group_name, []) - rgw_create_pools is defined +- name: Include check_nfs.yml + ansible.builtin.include_tasks: check_nfs.yml + when: inventory_hostname in groups.get(nfs_group_name, []) + - name: Include check_rbdmirror.yml ansible.builtin.include_tasks: check_rbdmirror.yml when: diff --git a/site-container.yml.sample b/site-container.yml.sample index 9facb5a5f..298709ddd 100644 --- a/site-container.yml.sample +++ b/site-container.yml.sample @@ -15,6 +15,7 @@ - osds - mdss - rgws + - nfss - rbdmirrors - clients - mgrs @@ -325,6 +326,44 @@ status: "Complete" end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" +- hosts: nfss + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for following imports - + - name: set ceph nfs install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-nfs + + # post-tasks for following imports - + - name: set ceph nfs install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + - hosts: rbdmirrors become: True gather_facts: false diff --git a/site.yml.sample b/site.yml.sample index 046eb64b7..8811d3cfc 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -16,6 +16,7 @@ - osds - mdss - rgws + - nfss - rbdmirrors - clients - mgrs @@ -317,6 +318,44 @@ status: "Complete" end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" +- hosts: nfss + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph nfs install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-nfs + + post_tasks: + - name: set ceph nfs install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + - hosts: rbdmirrors gather_facts: false become: True diff --git a/tests/conftest.py b/tests/conftest.py index 5c83246e5..69de7ac69 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -167,11 +167,11 @@ def node(host, request): if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user: # noqa E501 pytest.skip('Not a valid test for a non-secondary rbd-mirror node') - if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['clients'], ['monitoring']]: - pytest.skip('Not a valid test for client nodes') + if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]: + pytest.skip('Not a valid test for nfs or client nodes') - if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['clients'], ['monitoring']]: - pytest.skip('Not a valid test for client nodes') + if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]: + pytest.skip('Not a valid test for nfs or client nodes') if request.node.get_closest_marker("no_docker") and docker: pytest.skip( @@ -220,6 +220,8 @@ def pytest_collection_modifyitems(session, config, items): item.add_marker(pytest.mark.rbdmirrors) elif "rgw" in test_path: item.add_marker(pytest.mark.rgws) + elif "nfs" in test_path: + item.add_marker(pytest.mark.nfss) elif "grafana" in test_path: item.add_marker(pytest.mark.grafanas) else: diff --git a/tests/functional/add-mdss/container/vagrant_variables.yml b/tests/functional/add-mdss/container/vagrant_variables.yml index d8a0037ad..3b0f83df6 100644 --- a/tests/functional/add-mdss/container/vagrant_variables.yml +++ b/tests/functional/add-mdss/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 1 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-mdss/vagrant_variables.yml b/tests/functional/add-mdss/vagrant_variables.yml index bfec446d8..ba9c21784 100644 --- a/tests/functional/add-mdss/vagrant_variables.yml +++ b/tests/functional/add-mdss/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 1 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-mgrs/container/vagrant_variables.yml b/tests/functional/add-mgrs/container/vagrant_variables.yml index fe4092bdb..7c5bfe8d0 100644 --- a/tests/functional/add-mgrs/container/vagrant_variables.yml +++ b/tests/functional/add-mgrs/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-mgrs/vagrant_variables.yml b/tests/functional/add-mgrs/vagrant_variables.yml index 4b47784b2..5c0cf696b 100644 --- a/tests/functional/add-mgrs/vagrant_variables.yml +++ b/tests/functional/add-mgrs/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-mons/container/vagrant_variables.yml b/tests/functional/add-mons/container/vagrant_variables.yml index 813a01e1a..9f97f3c78 100644 --- a/tests/functional/add-mons/container/vagrant_variables.yml +++ b/tests/functional/add-mons/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 2 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-mons/vagrant_variables.yml b/tests/functional/add-mons/vagrant_variables.yml index b71a30375..d9b258215 100644 --- a/tests/functional/add-mons/vagrant_variables.yml +++ b/tests/functional/add-mons/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 2 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-osds/container/vagrant_variables.yml b/tests/functional/add-osds/container/vagrant_variables.yml index abc9bcc4d..8321f852e 100644 --- a/tests/functional/add-osds/container/vagrant_variables.yml +++ b/tests/functional/add-osds/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 2 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-osds/vagrant_variables.yml b/tests/functional/add-osds/vagrant_variables.yml index a730470a4..0a9b76f8e 100644 --- a/tests/functional/add-osds/vagrant_variables.yml +++ b/tests/functional/add-osds/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 2 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-rbdmirrors/container/vagrant_variables.yml b/tests/functional/add-rbdmirrors/container/vagrant_variables.yml index cbf4fa5de..4d6507aff 100644 --- a/tests/functional/add-rbdmirrors/container/vagrant_variables.yml +++ b/tests/functional/add-rbdmirrors/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 0 diff --git a/tests/functional/add-rbdmirrors/vagrant_variables.yml b/tests/functional/add-rbdmirrors/vagrant_variables.yml index 209d36135..8866b1a5d 100644 --- a/tests/functional/add-rbdmirrors/vagrant_variables.yml +++ b/tests/functional/add-rbdmirrors/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 0 diff --git a/tests/functional/add-rgws/container/vagrant_variables.yml b/tests/functional/add-rgws/container/vagrant_variables.yml index f9aa5172e..6592d0c03 100644 --- a/tests/functional/add-rgws/container/vagrant_variables.yml +++ b/tests/functional/add-rgws/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/add-rgws/vagrant_variables.yml b/tests/functional/add-rgws/vagrant_variables.yml index 4738fb700..8f4ce5460 100644 --- a/tests/functional/add-rgws/vagrant_variables.yml +++ b/tests/functional/add-rgws/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/all-in-one/container/vagrant_variables.yml b/tests/functional/all-in-one/container/vagrant_variables.yml index d6c0193f6..ee312ec6f 100644 --- a/tests/functional/all-in-one/container/vagrant_variables.yml +++ b/tests/functional/all-in-one/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 0 osd_vms: 3 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/all-in-one/vagrant_variables.yml b/tests/functional/all-in-one/vagrant_variables.yml index 45c4f0fd2..9f8c44749 100644 --- a/tests/functional/all-in-one/vagrant_variables.yml +++ b/tests/functional/all-in-one/vagrant_variables.yml @@ -4,6 +4,7 @@ mon_vms: 0 osd_vms: 3 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/all_daemons/container/hosts b/tests/functional/all_daemons/container/hosts index ec6d970a4..51d488ccb 100644 --- a/tests/functional/all_daemons/container/hosts +++ b/tests/functional/all_daemons/container/hosts @@ -19,6 +19,9 @@ mds2 [rgws] rgw0 +#[nfss] +#nfs0 + [clients] client0 client1 diff --git a/tests/functional/all_daemons/container/vagrant_variables.yml b/tests/functional/all_daemons/container/vagrant_variables.yml index aeb6859f1..8e08daa9e 100644 --- a/tests/functional/all_daemons/container/vagrant_variables.yml +++ b/tests/functional/all_daemons/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 3 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 2 diff --git a/tests/functional/all_daemons/group_vars/nfss b/tests/functional/all_daemons/group_vars/nfss new file mode 100644 index 000000000..826bdfecd --- /dev/null +++ b/tests/functional/all_daemons/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: false +nfs_ganesha_dev: true +nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/all_daemons/hosts b/tests/functional/all_daemons/hosts index ff4b6f561..8e2019776 100644 --- a/tests/functional/all_daemons/hosts +++ b/tests/functional/all_daemons/hosts @@ -23,6 +23,9 @@ rgw0 client0 client1 +#[nfss] +#nfs0 + [rbdmirrors] rbd-mirror0 diff --git a/tests/functional/all_daemons/vagrant_variables.yml b/tests/functional/all_daemons/vagrant_variables.yml index e32be0b54..48653bbbb 100644 --- a/tests/functional/all_daemons/vagrant_variables.yml +++ b/tests/functional/all_daemons/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 3 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 2 diff --git a/tests/functional/all_daemons_ipv6/container/hosts b/tests/functional/all_daemons_ipv6/container/hosts index ec6d970a4..51d488ccb 100644 --- a/tests/functional/all_daemons_ipv6/container/hosts +++ b/tests/functional/all_daemons_ipv6/container/hosts @@ -19,6 +19,9 @@ mds2 [rgws] rgw0 +#[nfss] +#nfs0 + [clients] client0 client1 diff --git a/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml b/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml index 1a67bd064..de2cccc3b 100644 --- a/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml +++ b/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 3 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 2 diff --git a/tests/functional/all_daemons_ipv6/group_vars/nfss b/tests/functional/all_daemons_ipv6/group_vars/nfss new file mode 100644 index 000000000..fc280e251 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: true +nfs_ganesha_dev: false +nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/all_daemons_ipv6/hosts b/tests/functional/all_daemons_ipv6/hosts index ff4b6f561..8e2019776 100644 --- a/tests/functional/all_daemons_ipv6/hosts +++ b/tests/functional/all_daemons_ipv6/hosts @@ -23,6 +23,9 @@ rgw0 client0 client1 +#[nfss] +#nfs0 + [rbdmirrors] rbd-mirror0 diff --git a/tests/functional/all_daemons_ipv6/vagrant_variables.yml b/tests/functional/all_daemons_ipv6/vagrant_variables.yml index 5ef0dc8bc..b512776af 100644 --- a/tests/functional/all_daemons_ipv6/vagrant_variables.yml +++ b/tests/functional/all_daemons_ipv6/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 3 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 2 diff --git a/tests/functional/cephadm/hosts b/tests/functional/cephadm/hosts index 39e1133e9..28a105b30 100644 --- a/tests/functional/cephadm/hosts +++ b/tests/functional/cephadm/hosts @@ -18,6 +18,9 @@ mds0 [rgws] rgw0 +[nfss] +nfs0 + [rbdmirrors] rbd-mirror0 diff --git a/tests/functional/cephadm/vagrant_variables.yml b/tests/functional/cephadm/vagrant_variables.yml index 19cfd396b..433e31965 100644 --- a/tests/functional/cephadm/vagrant_variables.yml +++ b/tests/functional/cephadm/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 2 mds_vms: 1 rgw_vms: 1 +nfs_vms: 1 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 0 diff --git a/tests/functional/collocation/container/hosts b/tests/functional/collocation/container/hosts index dad06a4c8..a699db833 100644 --- a/tests/functional/collocation/container/hosts +++ b/tests/functional/collocation/container/hosts @@ -19,5 +19,9 @@ mds0 rgw0 mds0 +#[nfss] +#rgw0 +#mds0 + [monitoring] -mon0 +mon0 \ No newline at end of file diff --git a/tests/functional/collocation/container/vagrant_variables.yml b/tests/functional/collocation/container/vagrant_variables.yml index 6539bc807..fb5059e96 100644 --- a/tests/functional/collocation/container/vagrant_variables.yml +++ b/tests/functional/collocation/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 2 mds_vms: 1 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/collocation/hosts b/tests/functional/collocation/hosts index 2aaf1d300..95b228ef8 100644 --- a/tests/functional/collocation/hosts +++ b/tests/functional/collocation/hosts @@ -20,5 +20,9 @@ mds0 rgw0 mds0 +#[nfss] +#rgw0 +#mds0 + [monitoring] -mon0 +mon0 \ No newline at end of file diff --git a/tests/functional/collocation/vagrant_variables.yml b/tests/functional/collocation/vagrant_variables.yml index a68c8359e..a53450e6f 100644 --- a/tests/functional/collocation/vagrant_variables.yml +++ b/tests/functional/collocation/vagrant_variables.yml @@ -4,6 +4,7 @@ mon_vms: 3 osd_vms: 2 mds_vms: 1 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/dev_setup.yml b/tests/functional/dev_setup.yml index af9682f89..fa85231c2 100644 --- a/tests/functional/dev_setup.yml +++ b/tests/functional/dev_setup.yml @@ -17,6 +17,20 @@ regexp: "ceph_repository:.*" replace: "ceph_repository: dev" dest: "{{ group_vars_path }}/all" + + - block: + - name: ensure nfs_ganesha_stable is set to False + replace: + regexp: "nfs_ganesha_stable:.*" + replace: "nfs_ganesha_stable: false" + dest: "{{ group_vars_path }}/nfss" + + - name: ensure nfs_ganesha_dev is set to True + replace: + regexp: "nfs_ganesha_dev:.*" + replace: "nfs_ganesha_dev: true" + dest: "{{ group_vars_path }}/nfss" + when: "'all_daemons' in group_vars_path.split('/')" when: change_dir is defined - name: print contents of {{ group_vars_path }}/all diff --git a/tests/functional/docker2podman/vagrant_variables.yml b/tests/functional/docker2podman/vagrant_variables.yml index f7e461a5b..921077308 100644 --- a/tests/functional/docker2podman/vagrant_variables.yml +++ b/tests/functional/docker2podman/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/external_clients/container/vagrant_variables.yml b/tests/functional/external_clients/container/vagrant_variables.yml index 15af9e6a9..4892f0d08 100644 --- a/tests/functional/external_clients/container/vagrant_variables.yml +++ b/tests/functional/external_clients/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 0 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 2 diff --git a/tests/functional/external_clients/vagrant_variables.yml b/tests/functional/external_clients/vagrant_variables.yml index 39e60ce64..827dcc928 100644 --- a/tests/functional/external_clients/vagrant_variables.yml +++ b/tests/functional/external_clients/vagrant_variables.yml @@ -4,6 +4,7 @@ mon_vms: 3 osd_vms: 0 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 2 diff --git a/tests/functional/infra_lv_create/vagrant_variables.yml b/tests/functional/infra_lv_create/vagrant_variables.yml index f3f4204ec..85e074d79 100644 --- a/tests/functional/infra_lv_create/vagrant_variables.yml +++ b/tests/functional/infra_lv_create/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 0 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml b/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml index 708876b30..516e14c0a 100644 --- a/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml +++ b/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/lvm-auto-discovery/vagrant_variables.yml b/tests/functional/lvm-auto-discovery/vagrant_variables.yml index 83226a731..5bdaadfa0 100644 --- a/tests/functional/lvm-auto-discovery/vagrant_variables.yml +++ b/tests/functional/lvm-auto-discovery/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/lvm-batch/container/vagrant_variables.yml b/tests/functional/lvm-batch/container/vagrant_variables.yml index 52cb2c983..f53ba8280 100644 --- a/tests/functional/lvm-batch/container/vagrant_variables.yml +++ b/tests/functional/lvm-batch/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/lvm-batch/vagrant_variables.yml b/tests/functional/lvm-batch/vagrant_variables.yml index 83226a731..5bdaadfa0 100644 --- a/tests/functional/lvm-batch/vagrant_variables.yml +++ b/tests/functional/lvm-batch/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/lvm-osds/container/vagrant_variables.yml b/tests/functional/lvm-osds/container/vagrant_variables.yml index 59d880159..d4418d89b 100644 --- a/tests/functional/lvm-osds/container/vagrant_variables.yml +++ b/tests/functional/lvm-osds/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 4 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/lvm-osds/vagrant_variables.yml b/tests/functional/lvm-osds/vagrant_variables.yml index 19ef7be88..b5d3089f2 100644 --- a/tests/functional/lvm-osds/vagrant_variables.yml +++ b/tests/functional/lvm-osds/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 4 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml b/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml index b6b2a483f..2603e244f 100644 --- a/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/podman/hosts b/tests/functional/podman/hosts index 8015ceaf3..dea6a9e55 100644 --- a/tests/functional/podman/hosts +++ b/tests/functional/podman/hosts @@ -13,7 +13,10 @@ mds0 [rgws] rgw0 -clients] +#[nfss] +#nfs0 + +[clients] client0 client1 @@ -24,4 +27,4 @@ rbd-mirror0 mon0 #[all:vars] -#ansible_python_interpreter=/usr/bin/python3 +#ansible_python_interpreter=/usr/bin/python3 \ No newline at end of file diff --git a/tests/functional/podman/vagrant_variables.yml b/tests/functional/podman/vagrant_variables.yml index eafecc2b2..a4ff599bd 100644 --- a/tests/functional/podman/vagrant_variables.yml +++ b/tests/functional/podman/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 2 mds_vms: 1 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 2 diff --git a/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml b/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml index a538d5a5b..2b0423c3b 100644 --- a/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml +++ b/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/rbdmirror/container/vagrant_variables.yml b/tests/functional/rbdmirror/container/vagrant_variables.yml index 123f03af3..eee5c310e 100644 --- a/tests/functional/rbdmirror/container/vagrant_variables.yml +++ b/tests/functional/rbdmirror/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/rbdmirror/secondary/vagrant_variables.yml b/tests/functional/rbdmirror/secondary/vagrant_variables.yml index 3fdfefe42..2b8351686 100644 --- a/tests/functional/rbdmirror/secondary/vagrant_variables.yml +++ b/tests/functional/rbdmirror/secondary/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/rbdmirror/vagrant_variables.yml b/tests/functional/rbdmirror/vagrant_variables.yml index ce204efc5..105cad593 100644 --- a/tests/functional/rbdmirror/vagrant_variables.yml +++ b/tests/functional/rbdmirror/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_mds/container/vagrant_variables.yml b/tests/functional/shrink_mds/container/vagrant_variables.yml index e076d1711..4bcf2c529 100644 --- a/tests/functional/shrink_mds/container/vagrant_variables.yml +++ b/tests/functional/shrink_mds/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 1 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_mds/vagrant_variables.yml b/tests/functional/shrink_mds/vagrant_variables.yml index 98068c12e..e0a7cf1a8 100644 --- a/tests/functional/shrink_mds/vagrant_variables.yml +++ b/tests/functional/shrink_mds/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 1 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_mgr/container/vagrant_variables.yml b/tests/functional/shrink_mgr/container/vagrant_variables.yml index c4a6f37bf..a26e72669 100644 --- a/tests/functional/shrink_mgr/container/vagrant_variables.yml +++ b/tests/functional/shrink_mgr/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_mgr/vagrant_variables.yml b/tests/functional/shrink_mgr/vagrant_variables.yml index 13acb0700..90c50d27f 100644 --- a/tests/functional/shrink_mgr/vagrant_variables.yml +++ b/tests/functional/shrink_mgr/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_mon/container/vagrant_variables.yml b/tests/functional/shrink_mon/container/vagrant_variables.yml index 890fb0092..d63a95a8d 100644 --- a/tests/functional/shrink_mon/container/vagrant_variables.yml +++ b/tests/functional/shrink_mon/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_mon/vagrant_variables.yml b/tests/functional/shrink_mon/vagrant_variables.yml index 3b98de5be..bf0038ad0 100644 --- a/tests/functional/shrink_mon/vagrant_variables.yml +++ b/tests/functional/shrink_mon/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_osd/container/vagrant_variables.yml b/tests/functional/shrink_osd/container/vagrant_variables.yml index 30f35ecd6..b69ecb35e 100644 --- a/tests/functional/shrink_osd/container/vagrant_variables.yml +++ b/tests/functional/shrink_osd/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 2 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_osd/vagrant_variables.yml b/tests/functional/shrink_osd/vagrant_variables.yml index e3be118f1..7d7da7a99 100644 --- a/tests/functional/shrink_osd/vagrant_variables.yml +++ b/tests/functional/shrink_osd/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 2 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml b/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml index c91eb2731..2f16c370f 100644 --- a/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml +++ b/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 0 diff --git a/tests/functional/shrink_rbdmirror/vagrant_variables.yml b/tests/functional/shrink_rbdmirror/vagrant_variables.yml index 409c700ec..85f92045c 100644 --- a/tests/functional/shrink_rbdmirror/vagrant_variables.yml +++ b/tests/functional/shrink_rbdmirror/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 1 client_vms: 0 diff --git a/tests/functional/shrink_rgw/container/vagrant_variables.yml b/tests/functional/shrink_rgw/container/vagrant_variables.yml index 4927acc69..b701d05b4 100644 --- a/tests/functional/shrink_rgw/container/vagrant_variables.yml +++ b/tests/functional/shrink_rgw/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/shrink_rgw/vagrant_variables.yml b/tests/functional/shrink_rgw/vagrant_variables.yml index 2a1a6fa17..41fb2f346 100644 --- a/tests/functional/shrink_rgw/vagrant_variables.yml +++ b/tests/functional/shrink_rgw/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 1 osd_vms: 1 mds_vms: 0 rgw_vms: 1 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/subset_update/container/vagrant_variables.yml b/tests/functional/subset_update/container/vagrant_variables.yml index 535158bef..3419fc4e1 100644 --- a/tests/functional/subset_update/container/vagrant_variables.yml +++ b/tests/functional/subset_update/container/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 0 rgw_vms: 2 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/subset_update/group_vars/nfss b/tests/functional/subset_update/group_vars/nfss new file mode 100644 index 000000000..fc280e251 --- /dev/null +++ b/tests/functional/subset_update/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: true +nfs_ganesha_dev: false +nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/subset_update/vagrant_variables.yml b/tests/functional/subset_update/vagrant_variables.yml index a1073f23a..c9105ddf3 100644 --- a/tests/functional/subset_update/vagrant_variables.yml +++ b/tests/functional/subset_update/vagrant_variables.yml @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 0 rgw_vms: 2 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0 diff --git a/tests/functional/tests/nfs/test_nfs_ganesha.py b/tests/functional/tests/nfs/test_nfs_ganesha.py new file mode 100644 index 000000000..fda75adf7 --- /dev/null +++ b/tests/functional/tests/nfs/test_nfs_ganesha.py @@ -0,0 +1,48 @@ +import json +import pytest + + +class TestNFSs(object): + + @pytest.mark.no_docker + @pytest.mark.parametrize('pkg', [ + 'nfs-ganesha', + 'nfs-ganesha-rgw' + ]) + def test_nfs_ganesha_package_is_installed(self, node, host, pkg): + assert host.package(pkg).is_installed + + @pytest.mark.no_docker + def test_nfs_service_enabled_and_running(self, node, host): + s = host.service("nfs-ganesha") + assert s.is_enabled + assert s.is_running + + @pytest.mark.no_docker + def test_nfs_config_override(self, node, host): + assert host.file( + "/etc/ganesha/ganesha.conf").contains("Entries_HWMark") + + def test_nfs_is_up(self, node, setup, ceph_status): + hostname = node["vars"]["inventory_hostname"] + cluster = setup["cluster_name"] + name = f"client.rgw.{hostname}" + output = ceph_status(f'/var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring', name=name) + keys = list(json.loads( + output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys()) + keys.remove('summary') + daemons = json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"] + hostnames = [] + for key in keys: + hostnames.append(daemons[key]['metadata']['hostname']) + + +# NOTE (guits): This check must be fixed. (Permission denied error) +# @pytest.mark.no_docker +# def test_nfs_rgw_fsal_export(self, node, host): +# if(host.mount_point("/mnt").exists): +# cmd = host.run("sudo umount /mnt") +# assert cmd.rc == 0 +# cmd = host.run("sudo mount.nfs localhost:/ceph /mnt/") +# assert cmd.rc == 0 +# assert host.mount_point("/mnt").exists diff --git a/tests/pytest.ini b/tests/pytest.ini index e3ac911f4..d4c15634b 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -11,6 +11,7 @@ markers = mdss: for mds nodes mgrs: for mgr nodes mons: for mon nodes + nfss: for nfs nodes osds: for osd nodes rbdmirrors: for rbdmirror nodes rgws: for rgw nodes diff --git a/vagrant_variables.yml.sample b/vagrant_variables.yml.sample index 49ee8fc46..376f3a582 100644 --- a/vagrant_variables.yml.sample +++ b/vagrant_variables.yml.sample @@ -8,6 +8,7 @@ mon_vms: 3 osd_vms: 3 mds_vms: 0 rgw_vms: 0 +nfs_vms: 0 grafana_server_vms: 0 rbd_mirror_vms: 0 client_vms: 0