site: collocated mon and mgr by default

This will speed up the deployment and also deploy mon and mgr collocated
just as recommended.
This won't prevent you of adding more and dedicaded machines for mgr if
needed.

Signed-off-by: Sébastien Han <seb@redhat.com>
pull/3375/head
Sébastien Han 2018-10-16 15:40:35 +02:00 committed by Guillaume Abrioux
parent a502327e52
commit 1c760904b0
25 changed files with 47 additions and 61 deletions

View File

@ -113,6 +113,15 @@
when: when:
- not containerized_deployment - not containerized_deployment
- name: stop ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
state: stopped
enabled: yes
ignore_errors: True # if no mgr collocated with mons
when:
- not containerized_deployment
- import_role: - import_role:
name: ceph-defaults name: ceph-defaults
private: false private: false
@ -133,6 +142,10 @@
- import_role: - import_role:
name: ceph-mon name: ceph-mon
private: false private: false
- import_role:
name: ceph-mgr
private: false
when: groups.get(mgr_group_name, []) | length == 0
- name: start ceph mon - name: start ceph mon
systemd: systemd:
@ -142,6 +155,15 @@
when: when:
- not containerized_deployment - not containerized_deployment
- name: start ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
state: started
enabled: yes
ignore_errors: True # if no mgr collocated with mons
when:
- not containerized_deployment
- name: restart containerized ceph mon - name: restart containerized ceph mon
systemd: systemd:
name: ceph-mon@{{ monitor_name }} name: ceph-mon@{{ monitor_name }}
@ -151,6 +173,16 @@
when: when:
- containerized_deployment - containerized_deployment
- name: restart containerized ceph mgr
systemd:
name: ceph-mgr@{{ monitor_name }}
state: restarted
enabled: yes
daemon_reload: yes
ignore_errors: True # if no mgr collocated with mons
when:
- containerized_deployment
- name: set mon_host_count - name: set mon_host_count
set_fact: mon_host_count={{ groups[mon_group_name] | length }} set_fact: mon_host_count={{ groups[mon_group_name] | length }}

View File

@ -60,9 +60,10 @@
secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}" secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}"
when: when:
- cephx - cephx
- groups.get(mgr_group_name, []) | length > 0
- inventory_hostname == groups[mon_group_name]|last - inventory_hostname == groups[mon_group_name]|last
with_items: "{{ groups.get(mgr_group_name, []) }}" with_items:
- "{{ groups.get(mgr_group_name, []) }}" # this honors the condition where mgrs run on separate machines
- "{{ groups.get(mon_group_name, []) }}" # this honors the new rule where mgrs are always collocated with mons
# once this gets backported github.com/ceph/ceph/pull/20983 # once this gets backported github.com/ceph/ceph/pull/20983
# we will be able to remove these 2 tasks below # we will be able to remove these 2 tasks below

View File

@ -34,8 +34,9 @@
when: when:
- cephx - cephx
- containerized_deployment - containerized_deployment
- groups.get(mgr_group_name, []) | length > 0 with_items:
with_items: "{{ groups.get(mgr_group_name, []) }}" - "{{ groups.get(mgr_group_name, []) }}" # this honors the condition where mgrs run on separate machines
- "{{ groups.get(mon_group_name, []) }}" # this honors the new rule where mgrs are always collocated with mons
- name: fetch ceph mgr key(s) - name: fetch ceph mgr key(s)
fetch: fetch:

View File

@ -125,6 +125,10 @@
- import_role: - import_role:
name: ceph-mon name: ceph-mon
private: false private: false
- import_role:
name: ceph-mgr
private: false
serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS
- hosts: mons - hosts: mons

View File

@ -113,6 +113,9 @@
- import_role: - import_role:
name: ceph-mon name: ceph-mon
private: false private: false
- import_role:
name: ceph-mgr
private: false
# post-tasks for preceding imports - # post-tasks for preceding imports -
- name: set ceph monitor install 'Complete' - name: set ceph monitor install 'Complete'

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,9 +1,6 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0
osd1 osd1

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,9 +1,6 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0
osd1 osd1

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -3,9 +3,6 @@ ceph-mon0 monitor_address=192.168.1.10
ceph-mon1 monitor_interface=eth1 ceph-mon1 monitor_interface=eth1
ceph-mon2 monitor_address=192.168.1.12 ceph-mon2 monitor_address=192.168.1.12
[mgrs]
ceph-mgr0
[osds] [osds]
ceph-osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd0' }" ceph-osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd0' }"
ceph-osd1 osd_crush_location="{ 'root': 'default', 'host': 'ceph-osd1' }" ceph-osd1 osd_crush_location="{ 'root': 'default', 'host': 'ceph-osd1' }"

View File

@ -12,7 +12,7 @@ nfs_vms: 0
rbd_mirror_vms: 1 rbd_mirror_vms: 1
client_vms: 2 client_vms: 2
iscsi_gw_vms: 1 iscsi_gw_vms: 1
mgr_vms: 1 mgr_vms: 0
# INSTALL SOURCE OF CEPH # INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev' # valid values are 'stable' and 'dev'

View File

@ -14,10 +14,6 @@ rgw0
rgw0 rgw0
mds0 mds0
[mgrs]
mon0
osd0
[rbdmirrors] [rbdmirrors]
rgw0 rgw0
mds0 mds0

View File

@ -3,9 +3,6 @@ mon0
mon1 mon1
mon2 mon2
[mgrs]
mgr0
[osds] [osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"

View File

@ -12,7 +12,7 @@ nfs_vms: 1
rbd_mirror_vms: 1 rbd_mirror_vms: 1
client_vms: 2 client_vms: 2
iscsi_gw_vms: 1 iscsi_gw_vms: 1
mgr_vms: 1 mgr_vms: 0
# SUBNETS TO USE FOR THE VMS # SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.17 public_subnet: 192.168.17

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,8 +1,5 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,9 +1,6 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,9 +1,6 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0

View File

@ -1,9 +1,6 @@
[mons] [mons]
ceph-mon0 monitor_address=192.168.71.10 ceph-mon0 monitor_address=192.168.71.10
[mgrs]
ceph-mon0
[osds] [osds]
ceph-osd0 ceph-osd0
ceph-osd1 ceph-osd1

View File

@ -1,9 +1,6 @@
[mons] [mons]
mon0 mon0
[mgrs]
mon0
[osds] [osds]
osd0 osd0
osd1 osd1

View File

@ -3,9 +3,6 @@ mon0
mon1 mon1
mon2 mon2
[mgrs]
mgr0
[osds] [osds]
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"