Merge pull request #694 from ceph/purge-ceph-conf-options

ceph-common: purge ceph.conf file
pull/772/head
Leseb 2016-05-10 18:24:07 +02:00
commit 52b2f1cb85
4 changed files with 16 additions and 245 deletions

View File

@ -51,6 +51,21 @@ ceph_conf_overrides:
**Note:** we will no longer accept pull requests that modify the ceph.conf template unless it helps the deployment. For simple configuration tweaks
please use the `ceph_conf_overrides` variable.
## Special notes
If you are looking at deploying a Ceph version older than Jewel.
It is highly recommended that you apply the following settings to your `group_vars/all` file on the `ceph_conf_overrides` variable:
```
ceph_conf_overrides:
osd:
osd recovery max active: 5
osd max backfills: 2
osd recovery op priority: 2
osd recovery threads: 1
```
## Setup with Vagrant using virtualbox provider
* Create vagrant_variables.yml

View File

@ -177,21 +177,7 @@ dummy:
#generate_fsid: true
#cephx: true
#cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
#cephx_cluster_require_signatures: true
#cephx_service_require_signatures: false
#max_open_files: 131072
#disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
#enable_debug_global: false
#debug_global_level: 20
#enable_debug_mon: false
#debug_mon_level: 20
#enable_debug_osd: false
#debug_osd_level: 20
#enable_debug_mds: false
#debug_mds_level: 20
## Client options
#
@ -234,9 +220,6 @@ dummy:
#rbd_client_log_path: /var/log/ceph
#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
#rbd_default_features: 3
#rbd_default_map_options: rw
#rbd_default_format: 2
## Monitor options
#
@ -245,36 +228,15 @@ dummy:
#monitor_interface: interface
#monitor_address: 0.0.0.0
#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
#mon_osd_down_out_interval: 600
#mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
#mon_clock_drift_allowed: .15
#mon_clock_drift_warn_backoff: 30
#mon_osd_full_ratio: .95
#mon_osd_nearfull_ratio: .85
#mon_osd_report_timeout: 300
#mon_pg_warn_max_per_osd: 0 # disable complains about low pgs numbers per osd
#mon_osd_allow_primary_affinity: "true"
#mon_pg_warn_max_object_skew: 10 # set to 20 or higher to disable complaints about number of PGs being too low if some pools have very few objects bringing down the average number of objects per pool. This happens when running RadosGW. Ceph default is 10
## OSD options
#
#journal_size: 0
#pool_default_pg_num: 128
#pool_default_pgp_num: 128
#pool_default_size: 3
#pool_default_min_size: 0 # 0 means no specific default; ceph will use (pool_default_size)-(pool_default_size/2) so 2 if pool_default_size=3
#public_network: 0.0.0.0/0
#cluster_network: "{{ public_network }}"
#osd_mkfs_type: xfs
#osd_mkfs_options_xfs: -f -i size=2048
#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
#osd_mon_heartbeat_interval: 30
# CRUSH
#pool_default_crush_rule: 0
#osd_crush_update_on_start: "true"
# Object backend
#osd_objectstore: filestore
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
@ -284,33 +246,6 @@ dummy:
# type.
#filestore_xattr_use_omap: null
# Performance tuning
#filestore_merge_threshold: 40
#filestore_split_multiple: 8
#osd_op_threads: 8
#filestore_op_threads: 8
#filestore_max_sync_interval: 5
#osd_max_scrubs: 1
# The OSD scrub window can be configured starting hammer only!
# Default settings will define a 24h window for the scrubbing operation
# The window is predefined from 0am midnight to midnight the next day.
#osd_scrub_begin_hour: 0
#osd_scrub_end_hour: 24
# Recovery tuning
#osd_recovery_max_active: 5
#osd_max_backfills: 2
#osd_recovery_op_priority: 2
#osd_recovery_max_chunk: 1048576
#osd_recovery_threads: 1
# Deep scrub
#osd_scrub_sleep: .1
#osd_disk_thread_ioprio_class: idle
#osd_disk_thread_ioprio_priority: 0
#osd_scrub_chunk_max: 5
#osd_deep_scrub_stride: 1048576
## MDS options
#
#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
@ -341,8 +276,6 @@ dummy:
#restapi_interface: "{{ monitor_interface }}"
#restapi_address: "{{ monitor_address }}"
#restapi_port: 5000
#restapi_base_url: /api/v0.1
#restapi_log_level: warning # available level are: critical, error, warning, info, debug
## Testing mode
# enable this mode _only_ when you have a single node

View File

@ -169,21 +169,7 @@ fsid: "{{ cluster_uuid.stdout }}"
generate_fsid: true
cephx: true
cephx_require_signatures: true # Kernel RBD does NOT support signatures for Kernels < 3.18!
cephx_cluster_require_signatures: true
cephx_service_require_signatures: false
max_open_files: 131072
disable_in_memory_logs: true # set this to false while enabling the options below
# Debug logs
enable_debug_global: false
debug_global_level: 20
enable_debug_mon: false
debug_mon_level: 20
enable_debug_osd: false
debug_osd_level: 20
enable_debug_mds: false
debug_mds_level: 20
## Client options
#
@ -226,9 +212,6 @@ rbd_client_directory_mode: null
rbd_client_log_path: /var/log/ceph
rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
rbd_default_features: 3
rbd_default_map_options: rw
rbd_default_format: 2
## Monitor options
#
@ -237,36 +220,15 @@ rbd_default_format: 2
monitor_interface: interface
monitor_address: 0.0.0.0
mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
mon_osd_down_out_interval: 600
mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
mon_clock_drift_allowed: .15
mon_clock_drift_warn_backoff: 30
mon_osd_full_ratio: .95
mon_osd_nearfull_ratio: .85
mon_osd_report_timeout: 300
mon_pg_warn_max_per_osd: 0 # disable complains about low pgs numbers per osd
mon_osd_allow_primary_affinity: "true"
mon_pg_warn_max_object_skew: 10 # set to 20 or higher to disable complaints about number of PGs being too low if some pools have very few objects bringing down the average number of objects per pool. This happens when running RadosGW. Ceph default is 10
## OSD options
#
journal_size: 0
pool_default_pg_num: 128
pool_default_pgp_num: 128
pool_default_size: 3
pool_default_min_size: 0 # 0 means no specific default; ceph will use (pool_default_size)-(pool_default_size/2) so 2 if pool_default_size=3
public_network: 0.0.0.0/0
cluster_network: "{{ public_network }}"
osd_mkfs_type: xfs
osd_mkfs_options_xfs: -f -i size=2048
osd_mount_options_xfs: noatime,largeio,inode64,swalloc
osd_mon_heartbeat_interval: 30
# CRUSH
pool_default_crush_rule: 0
osd_crush_update_on_start: "true"
# Object backend
osd_objectstore: filestore
# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
@ -276,33 +238,6 @@ osd_objectstore: filestore
# type.
filestore_xattr_use_omap: null
# Performance tuning
filestore_merge_threshold: 40
filestore_split_multiple: 8
osd_op_threads: 8
filestore_op_threads: 8
filestore_max_sync_interval: 5
osd_max_scrubs: 1
# The OSD scrub window can be configured starting hammer only!
# Default settings will define a 24h window for the scrubbing operation
# The window is predefined from 0am midnight to midnight the next day.
osd_scrub_begin_hour: 0
osd_scrub_end_hour: 24
# Recovery tuning
osd_recovery_max_active: 5
osd_max_backfills: 2
osd_recovery_op_priority: 2
osd_recovery_max_chunk: 1048576
osd_recovery_threads: 1
# Deep scrub
osd_scrub_sleep: .1
osd_disk_thread_ioprio_class: idle
osd_disk_thread_ioprio_priority: 0
osd_scrub_chunk_max: 5
osd_deep_scrub_stride: 1048576
## MDS options
#
mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
@ -333,8 +268,6 @@ email_address: foo@bar.com
restapi_interface: "{{ monitor_interface }}"
restapi_address: "{{ monitor_address }}"
restapi_port: 5000
restapi_base_url: /api/v0.1
restapi_log_level: warning # available level are: critical, error, warning, info, debug
## Testing mode
# enable this mode _only_ when you have a single node

View File

@ -2,14 +2,7 @@
# {{ ansible_managed }}
[global]
{% if cephx %}
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
cephx require signatures = {{ cephx_require_signatures }} # Kernel RBD does NOT support signatures!
cephx cluster require signatures = {{ cephx_cluster_require_signatures }}
cephx service require signatures = {{ cephx_service_require_signatures }}
{% else %}
{% if not cephx %}
auth cluster required = none
auth service required = none
auth client required = none
@ -19,77 +12,14 @@ auth supported = none
fsid = {{ fsid }}
{% endif %}
max open files = {{ max_open_files }}
osd pool default pg num = {{ pool_default_pg_num }}
osd pool default pgp num = {{ pool_default_pgp_num }}
osd pool default size = {{ pool_default_size }}
osd pool default min size = {{ pool_default_min_size }}
osd pool default crush rule = {{ pool_default_crush_rule }}
{% if common_single_host_mode is defined %}
osd crush chooseleaf type = 0
{% endif %}
{% if disable_in_memory_logs %}
# Disable in-memory logs
debug_lockdep = 0/0
debug_context = 0/0
debug_crush = 0/0
debug_buffer = 0/0
debug_timer = 0/0
debug_filer = 0/0
debug_objecter = 0/0
debug_rados = 0/0
debug_rbd = 0/0
debug_journaler = 0/0
debug_objectcatcher = 0/0
debug_client = 0/0
debug_osd = 0/0
debug_optracker = 0/0
debug_objclass = 0/0
debug_filestore = 0/0
debug_journal = 0/0
debug_ms = 0/0
debug_monc = 0/0
debug_tp = 0/0
debug_auth = 0/0
debug_finisher = 0/0
debug_heartbeatmap = 0/0
debug_perfcounter = 0/0
debug_asok = 0/0
debug_throttle = 0/0
debug_mon = 0/0
debug_paxos = 0/0
debug_rgw = 0/0
{% endif %}
{% if enable_debug_global %}
debug ms = {{ debug_global_level }}
{% endif %}
[client]
rbd cache = {{ rbd_cache }}
rbd cache writethrough until flush = true
rbd concurrent management ops = {{ rbd_concurrent_management_ops }}
admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
rbd default map options = {{ rbd_default_map_options }}
rbd default features = {{ rbd_default_features }} # sum features digits
rbd default format = {{ rbd_default_format }}
[mon]
mon osd down out interval = {{ mon_osd_down_out_interval }}
mon osd min down reporters = {{ mon_osd_min_down_reporters }}
mon clock drift allowed = {{ mon_clock_drift_allowed }}
mon clock drift warn backoff = {{ mon_clock_drift_warn_backoff }}
mon osd full ratio = {{ mon_osd_full_ratio }}
mon osd nearfull ratio = {{ mon_osd_nearfull_ratio }}
mon osd report timeout = {{ mon_osd_report_timeout }}
mon pg warn max per osd = {{ mon_pg_warn_max_per_osd }}
mon osd allow primary affinity = {{ mon_osd_allow_primary_affinity }}
mon pg warn max object skew = {{ mon_pg_warn_max_object_skew }}
{% if enable_debug_mon %}
debug mon = {{ debug_mon_level }}
debug paxos = {{ debug_mon_level }}
debug auth = {{ debug_mon_level }}
{% endif %}
{% if not mon_containerized_deployment_with_kv %}
{% for host in groups[mon_group_name] %}
{% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn %}
@ -119,44 +49,12 @@ cluster_network = {{ cluster_network }}
{% if public_network is defined %}
public_network = {{ public_network }}
{% endif %}
osd mon heartbeat interval = {{ osd_mon_heartbeat_interval }}
# Performance tuning
filestore merge threshold = {{ filestore_merge_threshold }}
filestore split multiple = {{ filestore_split_multiple }}
osd op threads = {{ osd_op_threads }}
filestore op threads = {{ filestore_op_threads }}
filestore max sync interval = {{ filestore_max_sync_interval }}
{% if filestore_xattr_use_omap != None %}
filestore xattr use omap = {{ filestore_xattr_use_omap }}
{% elif osd_mkfs_type == "ext4" %}
filestore xattr use omap = true
{# else, default is false #}
{% endif %}
osd max scrubs = {{ osd_max_scrubs }}
{% if ceph_stable_release not in ['argonaut','bobtail','cuttlefish','dumpling','emperor','firefly','giant'] %}
osd scrub begin hour = {{ osd_scrub_begin_hour }}
osd scrub end hour = {{ osd_scrub_end_hour }}
{% endif %}
# Recovery tuning
osd recovery max active = {{ osd_recovery_max_active }}
osd max backfills = {{ osd_max_backfills }}
osd recovery op priority = {{ osd_recovery_op_priority }}
osd recovery max chunk = {{ osd_recovery_max_chunk }}
osd recovery threads = {{ osd_recovery_threads }}
osd objectstore = {{ osd_objectstore }}
osd crush update on start = {{ osd_crush_update_on_start }}
{% if enable_debug_osd %}
debug osd = {{ debug_osd_level }}
debug filestore = {{ debug_osd_level }}
debug journal = {{ debug_osd_level }}
debug monc = {{ debug_osd_level }}
{% endif %}
# Deep scrub impact
osd scrub sleep = {{ osd_scrub_sleep }}
osd disk thread ioprio class = {{ osd_disk_thread_ioprio_class }}
osd disk thread ioprio priority = {{ osd_disk_thread_ioprio_priority }}
osd scrub chunk max = {{ osd_scrub_chunk_max }}
osd deep scrub stride = {{ osd_deep_scrub_stride }}
{% if groups[mds_group_name] is defined %}
{% for host in groups[mds_group_name] %}
@ -169,12 +67,6 @@ host = {{ hostvars[host]['ansible_hostname'] }}
{% endif %}
{% endfor %}
{% endif %}
{% if enable_debug_mds %}
debug mds = {{ debug_mds_level }}
debug mds balancer = {{ debug_mds_level }}
debug mds log = {{ debug_mds_level }}
debug mds migrator = {{ debug_mds_level }}
{% endif %}
{% if groups[rgw_group_name] is defined %}
{% for host in groups[rgw_group_name] %}
@ -211,8 +103,6 @@ nss db path = {{ radosgw_nss_db_path }}
{% else %}
{% include 'client_restapi_address.j2' %}
{% endif %}
restapi base url = {{ restapi_base_url }}
restapi log level = {{ restapi_log_level }}
keyring = /var/lib/ceph/restapi/ceph-restapi/keyring
log file = /var/log/ceph/ceph-restapi.log
{% endif %}