Merge pull request #1783 from ceph/remove-dup-var

resync group_vars files
pull/1784/head
Sébastien Han 2017-08-22 11:40:27 +02:00 committed by GitHub
commit 37ec3bc798
5 changed files with 4 additions and 5 deletions

View File

@ -397,7 +397,6 @@ dummy:
#kv_type: etcd
#kv_endpoint: 127.0.0.1
#kv_port: 2379
#containerized_deployment_with_kv: false
# this is only here for usage with the rolling_update.yml playbook

View File

@ -22,6 +22,6 @@ dummy:
# To have have ansible setfacl the generated key for $user, set the acls var like so:
# acls: ["u:$user:r--"]
#keys:
# - { name: client.test, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", acls: [] }
# - { name: client.test2, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", acls: [] }
# - { name: client.test, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] }
# - { name: client.test2, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] }

View File

@ -200,6 +200,7 @@ dummy:
# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
# when using lvm, not collocated journals.
# lvm_volumes is a dictionary whose key/value pair represent a data lv and a journal pair.
# Any logical volume or logical group used must be a name and not a path.
# Journals can be either a lv, device or partition. You can not use the same journal for many data lvs.
# For example:
# lvm_volumes:

View File

@ -159,6 +159,7 @@ ceph_rhcs: true
# version. The previous version was 1.3. The current version is 2.
#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}"
#ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com
#ceph_rhcs_cdn_debian_repo: https://customername:customerpasswd@rhcs.download.redhat.com
#ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com
#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}"
#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}"
@ -396,7 +397,6 @@ ceph_rhcs: true
#kv_type: etcd
#kv_endpoint: 127.0.0.1
#kv_port: 2379
#containerized_deployment_with_kv: false
# this is only here for usage with the rolling_update.yml playbook

View File

@ -389,7 +389,6 @@ ceph_docker_on_openstack: false
kv_type: etcd
kv_endpoint: 127.0.0.1
kv_port: 2379
containerized_deployment_with_kv: false
# this is only here for usage with the rolling_update.yml playbook