Merge pull request #662 from ceph/follow-up-cluster-name

ceph: implement cluster name support
pull/668/head v1.0.3
Leseb 2016-03-30 18:20:30 +02:00
commit 0784b8c5b1
22 changed files with 100 additions and 83 deletions

View File

@ -14,6 +14,7 @@ dummy:
########### ###########
#fetch_directory: fetch/ #fetch_directory: fetch/
#cluster: ceph # cluster name
########### ###########
# INSTALL # # INSTALL #

View File

@ -21,7 +21,7 @@
private: no private: no
tasks: tasks:
- name: exit playbook, if user didn't mean to purge cluster - name: exit playbook, if user did not mean to purge cluster
fail: fail:
msg: > msg: >
"Exiting purge-cluster playbook, cluster was NOT purged. "Exiting purge-cluster playbook, cluster was NOT purged.
@ -70,6 +70,10 @@
- python-rados - python-rados
- python-rbd - python-rbd
cluster: ceph # name of the cluster
monitor_name: "{{ ansible_hostname }}"
mds_name: "{{ ansible_hostname }}"
handlers: handlers:
- name: restart machine - name: restart machine
@ -183,7 +187,10 @@
# Ubuntu 14.04 # Ubuntu 14.04
- name: stop ceph osds on ubuntu - name: stop ceph osds on ubuntu
command: stop ceph-osd id={{ item }} shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl stop ceph-osd cluster={{ cluster }} id=$id
done
failed_when: false failed_when: false
when: when:
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and
@ -191,21 +198,21 @@
with_items: "{{ osd_ids.stdout_lines }}" with_items: "{{ osd_ids.stdout_lines }}"
- name: stop ceph mons on ubuntu - name: stop ceph mons on ubuntu
command: stop ceph-mon id={{ ansible_hostname }} command: initctl stop ceph-mon cluster={{ cluster }} id={{ monitor_name }}
failed_when: false failed_when: false
when: when:
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and
mon_group_name in group_names mon_group_name in group_names
- name: stop ceph mdss on ubuntu - name: stop ceph mdss on ubuntu
command: stop ceph-mds-all command: initctl stop ceph-mds cluster={{ cluster }} id={{ mds_name }}
failed_when: false failed_when: false
when: when:
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and
mds_group_name in group_names mds_group_name in group_names
- name: stop ceph rgws on ubuntu - name: stop ceph rgws on ubuntu
command: stop ceph-radosgw id=rgw.{{ ansible_hostname }} command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }}
failed_when: false failed_when: false
when: when:
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and

View File

@ -6,6 +6,7 @@
########### ###########
fetch_directory: fetch/ fetch_directory: fetch/
cluster: ceph # cluster name
########### ###########
# INSTALL # # INSTALL #

View File

@ -22,7 +22,7 @@
is_ceph_infernalis is_ceph_infernalis
- name: restart ceph mons on ubuntu - name: restart ceph mons on ubuntu
command: restart ceph-mon-all command: initctl restart ceph-mon cluster={{ cluster }} id={{ monitor_name }}
when: when:
socket.rc == 0 and socket.rc == 0 and
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and
@ -50,14 +50,17 @@
is_ceph_infernalis is_ceph_infernalis
- name: restart ceph osds on ubuntu - name: restart ceph osds on ubuntu
command: restart ceph-osd-all shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl restart ceph-osd cluster={{ cluster }} id=$id
done
when: when:
socket.rc == 0 and socket.rc == 0 and
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and
osd_group_name in group_names osd_group_name in group_names
- name: restart ceph mdss on ubuntu - name: restart ceph mdss on ubuntu
command: restart ceph-mds-all command: initctl restart ceph-mds cluster={{ cluster }} id={{ ansible_hostname }}
when: when:
socket.rc == 0 and socket.rc == 0 and
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and
@ -84,7 +87,7 @@
ceph_stable_release not in ceph_stable_releases ceph_stable_release not in ceph_stable_releases
- name: restart ceph rgws on ubuntu - name: restart ceph rgws on ubuntu
command: restart ceph-all command: initctl restart radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
when: when:
socketrgw.rc == 0 and socketrgw.rc == 0 and
ansible_distribution == 'Ubuntu' and ansible_distribution == 'Ubuntu' and

View File

@ -179,7 +179,7 @@
action: config_template action: config_template
args: args:
src: ceph.conf.j2 src: ceph.conf.j2
dest: /etc/ceph/ceph.conf dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}" owner: "{{ dir_owner }}"
group: "{{ dir_group }}" group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}" mode: "{{ activate_file_mode }}"
@ -207,3 +207,19 @@
owner: "{{ rbd_client_dir_owner }}" owner: "{{ rbd_client_dir_owner }}"
group: "{{ rbd_client_dir_group }}" group: "{{ rbd_client_dir_group }}"
mode: "{{ rbd_client_dir_mode }}" mode: "{{ rbd_client_dir_mode }}"
- name: configure cluster name
lineinfile:
dest: /etc/sysconfig/ceph
insertafter: EOF
line: "CLUSTER={{ cluster }}"
when:
ansible_os_family == "RedHat"
- name: configure cluster name
lineinfile:
dest: /etc/default/ceph/ceph
insertafter: EOF
line: "CLUSTER={{ cluster }}"
when:
ansible_os_family == "Debian"

View File

@ -183,10 +183,10 @@ debug mds migrator = {{ debug_mds_level }}
rgw dns name = {{ radosgw_dns_name }} rgw dns name = {{ radosgw_dns_name }}
{% endif %} {% endif %}
host = {{ hostvars[host]['ansible_hostname'] }} host = {{ hostvars[host]['ansible_hostname'] }}
keyring = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
log file = /var/log/ceph/radosgw-{{ hostvars[host]['ansible_hostname'] }}.log log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }} rgw data = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}
{% if radosgw_frontend == 'civetweb' %} {% if radosgw_frontend == 'civetweb' %}
rgw frontends = civetweb port={{ radosgw_civetweb_port }} rgw frontends = civetweb port={{ radosgw_civetweb_port }}
{% endif %} {% endif %}

View File

@ -23,30 +23,30 @@
group: "{{ key_group }}" group: "{{ key_group }}"
mode: "{{ key_mode }}" mode: "{{ key_mode }}"
with_items: with_items:
- { name: /var/lib/ceph/bootstrap-mds/ceph.keyring, copy_key: true } - { name: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true }
- { name: /etc/ceph/ceph.client.admin.keyring, copy_key: "{{ copy_admin_key }}" } - { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when: when:
cephx and cephx and
item.copy_key|bool item.copy_key|bool
- name: create mds directory - name: create mds directory
file: file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }} path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}
state: directory state: directory
owner: "{{ dir_owner }}" owner: "{{ dir_owner }}"
group: "{{ dir_group }}" group: "{{ dir_group }}"
mode: "{{ dir_mode }}" mode: "{{ dir_mode }}"
- name: create mds keyring - name: create mds keyring
command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ mds_name }}/keyring command: ceph --cluster {{ cluster }} --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
args: args:
creates: /var/lib/ceph/mds/ceph-{{ mds_name }}/keyring creates: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
changed_when: false changed_when: false
when: cephx when: cephx
- name: set mds key permissions - name: set mds key permissions
file: file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}/keyring path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
mode: "{{ key_mode }}" mode: "{{ key_mode }}"
owner: "{{ key_owner }}" owner: "{{ key_owner }}"
group: "{{ key_group }}" group: "{{ key_group }}"
@ -54,7 +54,7 @@
- name: activate metadata server with upstart - name: activate metadata server with upstart
file: file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}/{{ item }} path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/{{ item }}
state: touch state: touch
owner: "{{ activate_file_owner }}" owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}" group: "{{ activate_file_group }}"
@ -67,7 +67,7 @@
- name: activate metadata server with sysvinit - name: activate metadata server with sysvinit
file: file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}/{{ item }} path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/{{ item }}
state: touch state: touch
owner: "{{ activate_file_owner }}" owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}" group: "{{ activate_file_group }}"
@ -90,12 +90,9 @@
is_ceph_infernalis is_ceph_infernalis
- name: start and add that the metadata service to the init sequence (ubuntu) - name: start and add that the metadata service to the init sequence (ubuntu)
service: command: initctl emit ceph-mds cluster={{ cluster }} id={{ mds_name }}
name: ceph-mds
state: started
enabled: yes
args: "id={{ mds_name }}"
changed_when: false changed_when: false
failed_when: false
when: ansible_distribution == "Ubuntu" when: ansible_distribution == "Ubuntu"
- name: start and add that the metadata service to the init sequence (before infernalis) - name: start and add that the metadata service to the init sequence (before infernalis)

View File

@ -3,12 +3,12 @@
# the admin key is not instantanely created so we have to wait a bit # the admin key is not instantanely created so we have to wait a bit
- name: wait for client.admin key exists - name: wait for client.admin key exists
wait_for: wait_for:
path: /etc/ceph/ceph.client.admin.keyring path: /etc/ceph/{{ cluster }}.client.admin.keyring
- name: create ceph rest api keyring when mon is not containerized - name: create ceph rest api keyring when mon is not containerized
command: ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring command: ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
args: args:
creates: /etc/ceph/ceph.client.restapi.keyring creates: /etc/ceph/{{ cluster }}.client.restapi.keyring
changed_when: false changed_when: false
when: when:
cephx and cephx and
@ -41,9 +41,9 @@
run_once: true run_once: true
with_items: with_items:
- "{{ ceph_keys.stdout_lines }}" - "{{ ceph_keys.stdout_lines }}"
- /var/lib/ceph/bootstrap-osd/ceph.keyring - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rgw/ceph.keyring - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/ceph.keyring - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
when: cephx when: cephx
- name: drop in a motd script to report status when logging in - name: drop in a motd script to report status when logging in

View File

@ -4,7 +4,7 @@
# the role 'ceph-common' doesn't get inherited so the condition can not be evaluate # the role 'ceph-common' doesn't get inherited so the condition can not be evaluate
# since those check are performed by the ceph-common role # since those check are performed by the ceph-common role
- name: create filesystem pools - name: create filesystem pools
command: ceph osd pool create {{ item }} {{ pool_default_pg_num }} command: ceph --cluster {{ cluster }} osd pool create {{ item }} {{ pool_default_pg_num }}
with_items: with_items:
- cephfs_data - cephfs_data
- cephfs_metadata - cephfs_metadata
@ -12,6 +12,6 @@
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }} when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
- name: create ceph filesystem - name: create ceph filesystem
command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }} command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }} when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}

View File

@ -38,16 +38,16 @@
- name: create monitor directory - name: create monitor directory
file: file:
path: /var/lib/ceph/mon/ceph-{{ monitor_name }} path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
state: directory state: directory
owner: "{{ dir_owner }}" owner: "{{ dir_owner }}"
group: "{{ dir_group }}" group: "{{ dir_group }}"
mode: "{{ dir_mode }}" mode: "{{ dir_mode }}"
- name: ceph monitor mkfs with keyring (for or after infernalis release) - name: ceph monitor mkfs with keyring (for or after infernalis release)
command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args: args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/keyring creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when: when:
cephx and cephx and
is_ceph_infernalis is_ceph_infernalis
@ -55,7 +55,7 @@
- name: ceph monitor mkfs without keyring (for or after infernalis release) - name: ceph monitor mkfs without keyring (for or after infernalis release)
command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args: args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/store.db creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when: when:
not cephx and not cephx and
is_ceph_infernalis is_ceph_infernalis
@ -63,7 +63,7 @@
- name: ceph monitor mkfs with keyring (before infernalis release) - name: ceph monitor mkfs with keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }} command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args: args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/keyring creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when: when:
cephx and cephx and
not is_ceph_infernalis not is_ceph_infernalis
@ -71,7 +71,7 @@
- name: ceph monitor mkfs without keyring (before infernalis release) - name: ceph monitor mkfs without keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args: args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/store.db creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when: when:
not cephx and not cephx and
not is_ceph_infernalis not is_ceph_infernalis

View File

@ -1,6 +1,6 @@
--- ---
- name: create openstack pool - name: create openstack pool
command: ceph osd pool create {{ item.name }} {{ item.pg_num }} command: ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pg_num }}
with_items: with_items:
- "{{ openstack_glance_pool }}" - "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
@ -10,9 +10,9 @@
failed_when: false failed_when: false
- name: create openstack keys - name: create openstack keys
command: ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring command: ceph --cluster {{ cluster }} auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
args: args:
creates: /etc/ceph/ceph.{{ item.name }}.keyring creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
with_items: openstack_keys with_items: openstack_keys
changed_when: false changed_when: false
when: cephx when: cephx

View File

@ -1,11 +1,11 @@
--- ---
- name: collect all the pools - name: collect all the pools
command: rados lspools command: rados --cluster {{ cluster }} lspools
register: ceph_pools register: ceph_pools
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}" when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
- name: secure the cluster - name: secure the cluster
command: ceph osd pool set {{ item[0] }} {{ item[1] }} true command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
with_nested: with_nested:
- ceph_pools.stdout_lines - ceph_pools.stdout_lines
- secure_cluster_flags - secure_cluster_flags

View File

@ -1,7 +1,7 @@
--- ---
- name: activate monitor with upstart - name: activate monitor with upstart
file: file:
path: /var/lib/ceph/mon/ceph-{{ monitor_name }}/{{ item }} path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/{{ item }}
state: touch state: touch
owner: "{{ activate_file_owner }}" owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}" group: "{{ activate_file_group }}"
@ -13,11 +13,9 @@
when: ansible_distribution == "Ubuntu" when: ansible_distribution == "Ubuntu"
- name: start and add that the monitor service to the init sequence (ubuntu) - name: start and add that the monitor service to the init sequence (ubuntu)
service: command: initctl emit ceph-mon cluster={{ cluster }} id={{ monitor_name }}
name: ceph-mon changed_when: false
state: started failed_when: false
enabled: yes
args: "id={{ monitor_name }}"
when: ansible_distribution == "Ubuntu" when: ansible_distribution == "Ubuntu"
# NOTE (leseb): somehow the service ansible module is messing things up # NOTE (leseb): somehow the service ansible module is messing things up
@ -51,13 +49,13 @@
is_ceph_infernalis is_ceph_infernalis
- name: collect admin and bootstrap keys - name: collect admin and bootstrap keys
command: ceph-create-keys --id {{ monitor_name }} command: ceph-create-keys --cluster {{ cluster }} --id {{ monitor_name }}
changed_when: false changed_when: false
failed_when: false failed_when: false
when: cephx when: cephx
- name: get ceph monitor version - name: get ceph monitor version
shell: ceph daemon mon."{{ monitor_name }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.' shell: ceph --cluster {{ cluster }} daemon mon."{{ monitor_name }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
changed_when: false changed_when: false
failed_when: "'No such file or directory' in ceph_version.stderr" failed_when: "'No such file or directory' in ceph_version.stderr"
register: ceph_version register: ceph_version

View File

@ -49,8 +49,8 @@
mode: "{{ activate_file_mode }}" mode: "{{ activate_file_mode }}"
with_items: combined_osd_id.results with_items: combined_osd_id.results
- name: copy ceph.conf for assembling - name: copy {{ cluster }}.conf for assembling
command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/ command: cp /etc/ceph/{{ cluster }}.conf /etc/ceph/ceph.d/
changed_when: false changed_when: false
- name: assemble osd sections - name: assemble osd sections
@ -61,10 +61,10 @@
group: "{{ dir_group }}" group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}" mode: "{{ activate_file_mode }}"
- name: assemble ceph conf and osd fragments - name: assemble {{ cluster }}.conf and osd fragments
assemble: assemble:
src: /etc/ceph/ceph.d/ src: /etc/ceph/ceph.d/
dest: /etc/ceph/ceph.conf dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}" owner: "{{ dir_owner }}"
group: "{{ dir_group }}" group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}" mode: "{{ activate_file_mode }}"

View File

@ -29,8 +29,8 @@
group: "{{ key_group }}" group: "{{ key_group }}"
mode: "{{ key_mode }}" mode: "{{ key_mode }}"
with_items: with_items:
- { name: /var/lib/ceph/bootstrap-osd/ceph.keyring, copy_key: true } - { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
- { name: /etc/ceph/ceph.client.admin.keyring, copy_key: "{{ copy_admin_key }}" } - { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when: when:
cephx and cephx and
item.copy_key|bool item.copy_key|bool

View File

@ -11,7 +11,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has # NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too. # failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions - name: automatic prepare osd disk(s) without partitions
command: ceph-disk prepare --bluestore "/dev/{{ item.key }}" command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}"
ignore_errors: true ignore_errors: true
register: prepared_osds register: prepared_osds
with_dict: ansible_devices with_dict: ansible_devices
@ -23,7 +23,7 @@
osd_auto_discovery osd_auto_discovery
- name: manually prepare osd disk(s) - name: manually prepare osd disk(s)
command: ceph-disk prepare --bluestore "{{ item.2 }}" command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
ignore_errors: true ignore_errors: true
with_together: with_together:
- combined_parted_results.results - combined_parted_results.results

View File

@ -10,7 +10,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has # NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too. # failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions - name: automatic prepare osd disk(s) without partitions
command: ceph-disk prepare "/dev/{{ item.key }}" command: ceph-disk prepare --cluster "{{ cluster }}" "/dev/{{ item.key }}"
ignore_errors: true ignore_errors: true
register: prepared_osds register: prepared_osds
with_dict: ansible_devices with_dict: ansible_devices
@ -22,7 +22,7 @@
osd_auto_discovery osd_auto_discovery
- name: manually prepare osd disk(s) - name: manually prepare osd disk(s)
command: "ceph-disk prepare {{ item.2 }}" command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
ignore_errors: true ignore_errors: true
with_together: with_together:
- combined_parted_results.results - combined_parted_results.results

View File

@ -16,7 +16,7 @@
# if you have 64 disks with 4TB each, this will take a while # if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop # since Ansible will sequential process the loop
- name: prepare OSD disk(s) - name: prepare OSD disk(s)
command: "ceph-disk prepare {{ item }}" command: "ceph-disk prepare --cluster {{ cluster }} {{ item }}"
with_items: osd_directories with_items: osd_directories
changed_when: false changed_when: false
when: osd_directory when: osd_directory

View File

@ -10,7 +10,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has # NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too. # failed, this is why we check if the device is a partition too.
- name: prepare osd disk(s) - name: prepare osd disk(s)
command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}" command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
with_together: with_together:
- combined_parted_results.results - combined_parted_results.results
- combined_ispartition_results.results - combined_ispartition_results.results

View File

@ -9,7 +9,7 @@
- name: copy ceph rest api keyring - name: copy ceph rest api keyring
copy: copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/ceph.client.restapi.keyring" src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.restapi.keyring"
dest: "/var/lib/ceph/restapi/ceph-restapi/keyring" dest: "/var/lib/ceph/restapi/ceph-restapi/keyring"
owner: "{{ key_owner }}" owner: "{{ key_owner }}"
group: "{{ key_group }}" group: "{{ key_group }}"

View File

@ -9,7 +9,7 @@
with_items: with_items:
- /var/lib/ceph/bootstrap-rgw - /var/lib/ceph/bootstrap-rgw
- /var/lib/ceph/radosgw - /var/lib/ceph/radosgw
- /var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }} - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}
- "{{ rbd_client_admin_socket_path }}" - "{{ rbd_client_admin_socket_path }}"
- name: copy rados gateway bootstrap key - name: copy rados gateway bootstrap key
@ -20,22 +20,22 @@
group: "{{ key_group }}" group: "{{ key_group }}"
mode: "{{ key_mode }}" mode: "{{ key_mode }}"
with_items: with_items:
- { name: /var/lib/ceph/bootstrap-rgw/ceph.keyring, copy_key: true } - { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
- { name: /etc/ceph/ceph.client.admin.keyring, copy_key: "{{ copy_admin_key }}" } - { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when: when:
cephx and cephx and
item.copy_key|bool item.copy_key|bool
- name: create rados gateway keyring - name: create rados gateway keyring
command: ceph --cluster ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/ceph.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}/keyring command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
args: args:
creates: /var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}/keyring creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
changed_when: false changed_when: false
when: cephx when: cephx
- name: set rados gateway key permissions (for or after the infernalis release) - name: set rados gateway key permissions (for or after the infernalis release)
file: file:
path: /var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}/keyring path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
mode: "{{ key_mode }}" mode: "{{ key_mode }}"
owner: "{{ key_owner }}" owner: "{{ key_owner }}"
group: "{{ key_group }}" group: "{{ key_group }}"
@ -43,7 +43,7 @@
- name: activate rados gateway with upstart - name: activate rados gateway with upstart
file: file:
path: /var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}/{{ item }} path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}
state: touch state: touch
owner: "{{ activate_file_owner }}" owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}" group: "{{ activate_file_group }}"
@ -56,7 +56,7 @@
- name: activate rados gateway with sysvinit - name: activate rados gateway with sysvinit
file: file:
path: /var/lib/ceph/radosgw/ceph-rgw.{{ ansible_hostname }}/{{ item }} path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}
state: touch state: touch
owner: "{{ activate_file_owner }}" owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}" group: "{{ activate_file_group }}"

View File

@ -12,16 +12,10 @@
ansible_distribution != "Ubuntu" and ansible_distribution != "Ubuntu" and
ansible_os_family != 'RedHat' ansible_os_family != 'RedHat'
- name: activate rgw on ubuntu - name: start and add that the rados gateway service to the init sequence (ubuntu)
command: initctl emit radosgw cluster=ceph id=rgw.{{ ansible_hostname }} command: initctl emit radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
changed_when: false changed_when: false
when: ansible_distribution == 'Ubuntu' failed_when: false
- name: start rgw on ubuntu
service:
name: radosgw
args: id=rgw.{{ ansible_hostname }}
state: started
when: ansible_distribution == 'Ubuntu' when: ansible_distribution == 'Ubuntu'
- name: start rgw on red hat (before or on infernalis) - name: start rgw on red hat (before or on infernalis)