Merge pull request #276 from ceph/rework-groupvar

Add more flags to RBD clients
pull/277/head
Leseb 2015-06-09 17:22:54 +02:00
commit e558b0c296
5 changed files with 59 additions and 6 deletions

View File

@ -61,7 +61,13 @@ dummy:
## Client options
#
rbd_concurrent_management_ops: 20
#rbd_cache: "true"
#rbd_cache_writethrough_until_flush: "true"
#rbd_concurrent_management_ops: 20
#rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
#rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_client_log_path: /var/log/rbd-clients/
#rbd_client_admin_socket_path: /var/run/ceph/rbd-clients/$cluster-$type.$id.$pid.$cctid.asok
## Monitor options
#
@ -136,3 +142,23 @@ rbd_concurrent_management_ops: 20
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
#common_single_host_mode: true
#############
# OS TUNING #
#############
#disable_transparent_hugepage: true
#disable_swap: true
#os_tuning_params:
# - { name: kernel.pid_max, value: 4194303 }
# - { name: fs.file-max, value: 26234859 }
# - { name: vm.zone_reclaim_mode, value: 0 }
# - { name: vm.vfs_cache_pressure, value: 50 }
##########
# DOCKER #
##########
#docker: false

View File

@ -55,10 +55,7 @@ cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kern
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
# Logging
disable_in_memory_logs: true # set this to false while enabling the options below
rbd_client_log_file: /var/log/qemu/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
# Debug logs
enable_debug_global: false
@ -72,7 +69,13 @@ debug_mds_level: 20
## Client options
#
rbd_cache: "true"
rbd_cache_writethrough_until_flush: "true"
rbd_concurrent_management_ops: 20
rbd_client_directories: false # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions, this WON'T work if libvirt and kvm are installed
rbd_client_log_file: /var/log/rbd-clients/qemu-guest-$pid.log # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_log_path: /var/log/rbd-clients/
rbd_client_admin_socket_path: /var/run/ceph/rbd-clients/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
## Monitor options
#

View File

@ -57,3 +57,15 @@
- ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
- ceph-mds #|--> they don't get update so we need to force them
- libcephfs1 #|
- name: configure rbd clients directories
file: >
path={{ item }}
state=directory
owner=libvirt-qemu
group=kvm
mode=0755
with_items:
- rbd_client_log_path
- rbd_client_admin_socket_path
when: rbd_client_directories

View File

@ -63,3 +63,15 @@
- "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
- "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
when: ceph_stable_ice
- name: configure rbd clients directories
file: >
path={{ item }}
state=directory
owner=qemu
group=libvirtd
mode=0755
with_items:
- rbd_client_log_path
- rbd_client_admin_socket_path
when: rbd_client_directories

View File

@ -62,10 +62,10 @@
{% endif %}
[client]
rbd cache = true
rbd cache = {{ rbd_cache }}
rbd cache writethrough until flush = true
rbd concurrent management ops = {{ rbd_concurrent_management_ops }}
admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
admin socket = {{ rbd_client_admin_socket_path }} # must be writable by QEMU and allowed by SELinux or AppArmor
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
[mon]