diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 2a75fa6bb..6504ba5c5 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -497,9 +497,9 @@ dummy: # do not ever change this here #rolling_update: false - ##################### # Docker pull retry # ##################### #docker_pull_retry: 3 #docker_pull_timeout: "300s" + diff --git a/group_vars/nfss.yml.sample b/group_vars/nfss.yml.sample index 4ec3a38a4..e0f3baeb4 100644 --- a/group_vars/nfss.yml.sample +++ b/group_vars/nfss.yml.sample @@ -44,6 +44,14 @@ dummy: ###################### #ceph_nfs_log_file: "/var/log/ganesha/ganesha.log" #ceph_nfs_dynamic_exports: false +# If set to true then rados is used to store ganesha exports +# and client sessions information, this is useful if you +# run multiple nfs-ganesha servers in active/passive mode and +# want to do failover +#ceph_nfs_rados_backend: false +# Name of the rados object used to store a list of the export rados +# object URLS +#ceph_nfs_rados_export_index: "ganesha-export-index" # Address ganesha service should listen on, by default ganesha listens on all # addresses. (Note: ganesha ignores this parameter in current version due to # this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217) diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index f95d195b2..8dfce845c 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -497,9 +497,9 @@ ceph_repository: rhcs # do not ever change this here #rolling_update: false - ##################### # Docker pull retry # ##################### #docker_pull_retry: 3 #docker_pull_timeout: "300s" + diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml index 6bbcf47e8..84723d1dc 100644 --- a/roles/ceph-nfs/defaults/main.yml +++ b/roles/ceph-nfs/defaults/main.yml @@ -36,6 +36,14 @@ nfs_obj_gw: true ###################### ceph_nfs_log_file: "/var/log/ganesha/ganesha.log" ceph_nfs_dynamic_exports: false +# If set to true then rados is used to store ganesha exports +# and client sessions information, this is useful if you +# run multiple nfs-ganesha servers in active/passive mode and +# want to do failover +ceph_nfs_rados_backend: false +# Name of the rados object used to store a list of the export rados +# object URLS +ceph_nfs_rados_export_index: "ganesha-export-index" # Address ganesha service should listen on, by default ganesha listens on all # addresses. (Note: ganesha ignores this parameter in current version due to # this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217) diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml index b1bed643e..aaf377d63 100644 --- a/roles/ceph-nfs/tasks/start_nfs.yml +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -1,4 +1,19 @@ --- +- name: check if rados index object exists + shell: "rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" + changed_when: false + failed_when: false + register: rados_index_exists + check_mode: no + when: + - ceph_nfs_rados_backend + +- name: create an empty rados index object + shell: "echo | rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} -" + when: + - ceph_nfs_rados_backend + - rados_index_exists.rc != 0 + - name: create /etc/ganesha file: path: /etc/ganesha diff --git a/roles/ceph-nfs/templates/ganesha.conf.j2 b/roles/ceph-nfs/templates/ganesha.conf.j2 index 7792de5db..8e05f793d 100644 --- a/roles/ceph-nfs/templates/ganesha.conf.j2 +++ b/roles/ceph-nfs/templates/ganesha.conf.j2 @@ -1,7 +1,7 @@ #jinja2: trim_blocks: "true", lstrip_blocks: "true" # {{ ansible_managed }} -{% if ceph_nfs_dynamic_exports %} +{% if ceph_nfs_dynamic_exports and not ceph_nfs_rados_backend %} %include /etc/ganesha/export.d/INDEX.conf {% endif %} @@ -12,6 +12,22 @@ NFS_Core_Param {% endif %} } +{% if ceph_nfs_rados_backend %} +RADOS_URLS { + ceph_conf = '/etc/ceph/{{ cluster }}.conf'; + userid = "{{ ceph_nfs_ceph_user }}"; +} +%url rados://{{ cephfs_data }}/{{ ceph_nfs_rados_export_index }} + +NFSv4 { + RecoveryBackend = 'rados_kv'; +} +RADOS_KV { + ceph_conf = '/etc/ceph/{{ cluster }}.conf'; + userid = "{{ ceph_nfs_ceph_user }}"; + pool = "{{ cephfs_data }}"; +} +{% endif %} {% if nfs_file_gw %} EXPORT