Merge pull request #1243 from guits/refact_code

Remove support of releases prior to Jewel.
pull/995/head
Sébastien Han 2017-01-31 12:07:43 +01:00 committed by GitHub
commit efc49e2347
32 changed files with 102 additions and 744 deletions

View File

@ -390,7 +390,7 @@
- is_systemd
- not mds_containerized_deployment
- name: restart ceph mdss with systemd
- name: restart ceph mdss
service:
name: ceph-mds@{{ ansible_hostname }}
state: restarted

View File

@ -99,7 +99,7 @@
when:
- item.rc != 0
- name: stop monitor service (systemd)
- name: stop monitor service
service:
name: ceph-mon@{{ item }}
state: stopped

View File

@ -3,7 +3,7 @@
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
dest: "/etc/ceph/"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
owner: "ceph"
group: "ceph"
mode: "0600"
when: cephx

View File

@ -4,123 +4,39 @@
update-cache: yes
- name: restart ceph mons
command: service ceph restart mon
when:
- socket.rc == 0
- ansible_distribution != 'Ubuntu'
- mon_group_name in group_names
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: restart ceph mons with systemd
service:
name: ceph-mon@{{ monitor_name }}
state: restarted
when:
- socket.rc == 0
- use_systemd
- mon_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: restart ceph mons on ubuntu
command: initctl restart ceph-mon cluster={{ cluster }} id={{ monitor_name }}
when:
- socket.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- mon_group_name in group_names
- name: restart ceph osds
command: service ceph restart osd
when:
- socket.rc == 0
- ansible_distribution != 'Ubuntu'
- osd_group_name in group_names
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
# This does not just restart OSDs but everything else too. Unfortunately
# at this time the ansible role does not have an OSD id list to use
# for restarting them specifically.
- name: restart ceph osds with systemd
- name: restart ceph osds
service:
name: ceph.target
state: restarted
when:
- socket.rc == 0
- use_systemd
- osd_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: restart ceph osds on ubuntu
shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl restart ceph-osd cluster={{ cluster }} id=$id
done
when:
- socket.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- osd_group_name in group_names
- name: restart ceph mdss on ubuntu
command: initctl restart ceph-mds cluster={{ cluster }} id={{ ansible_hostname }}
when:
- socket.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- mds_group_name in group_names
- name: restart ceph mdss
command: service ceph restart mds
when:
- socket.rc == 0
- ansible_distribution != 'Ubuntu'
- use_systemd
- mds_group_name in group_names
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: restart ceph mdss with systemd
service:
name: ceph-mds@{{ mds_name }}
state: restarted
when:
- socket.rc == 0
- use_systemd
- mds_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: restart ceph rgws on ubuntu
command: initctl restart radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
when:
- socketrgw.rc == 0
- ansible_distribution == 'Ubuntu'
- not use_systemd
- rgw_group_name in group_names
- name: restart ceph rgws
command: /etc/init.d/radosgw restart
when:
- socketrgw.rc == 0
- ansible_distribution != 'Ubuntu'
- rgw_group_name in group_names
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: restart ceph rgws on red hat
command: /etc/init.d/ceph-radosgw restart
when:
- socketrgw.rc == 0
- ansible_os_family == 'RedHat'
- rgw_group_name in group_names
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: restart ceph rgws with systemd
service:
name: ceph-rgw@{{ ansible_hostname }}
state: restarted
when:
- socketrgw.rc == 0
- use_systemd
- rgw_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: restart ceph nfss
service:

View File

@ -34,3 +34,8 @@
when:
- ansible_version.major|int == 1
- ansible_version.minor|int < 9
- name: fail if systemd is not present
fail:
msg: "Systemd must be present"
when: ansible_service_mgr != 'systemd'

View File

@ -49,19 +49,6 @@
- set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
# NOTE(mattt): On ansible 2.x we can use ansible_service_mgr instead
- name: check init system
slurp:
src: /proc/1/comm
always_run: yes
register: init_system
- set_fact:
init_system={{ init_system.content | b64decode | trim }}
- set_fact:
use_systemd={{ init_system.strip() == 'systemd' }}
- set_fact:
mds_name: "{{ ansible_hostname }}"
when: not mds_use_fqdn
@ -70,81 +57,21 @@
mds_name: "{{ ansible_fqdn }}"
when: mds_use_fqdn
- set_fact:
dir_owner: ceph
dir_group: ceph
dir_mode: "0755"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
dir_owner: root
dir_group: root
dir_mode: "0755"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
key_owner: root
key_group: root
key_mode: "0600"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
key_owner: ceph
key_group: ceph
key_mode: "0600"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
activate_file_owner: ceph
activate_file_group: ceph
activate_file_mode: "0644"
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- set_fact:
activate_file_owner: root
activate_file_group: root
activate_file_mode: "0644"
when: ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- set_fact:
rbd_client_directory_owner: root
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_owner: ceph
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
- set_fact:
rbd_client_directory_group: root
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_group: ceph
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
- set_fact:
rbd_client_directory_mode: "1777"
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode
- set_fact:
rbd_client_directory_mode: "0770"
when:
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- rbd_client_directory_mode is not defined
or not rbd_client_directory_mode

View File

@ -3,32 +3,22 @@
file:
path: /etc/ceph
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
- name: "generate ceph configuration file: {{ cluster }}.conf"
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
owner: "ceph"
group: "ceph"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- restart ceph mons
- restart ceph mons on ubuntu
- restart ceph mons with systemd
- restart ceph osds
- restart ceph osds on ubuntu
- restart ceph osds with systemd
- restart ceph mdss
- restart ceph mdss on ubuntu
- restart ceph mdss with systemd
- restart ceph rgws
- restart ceph rgws on ubuntu
- restart ceph rgws on red hat
- restart ceph rgws with systemd
- restart ceph nfss

View File

@ -49,15 +49,6 @@
- ceph_origin == 'local'
- use_installer
- name: install ceph
yum:
name: ceph
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
- ceph_release_num.{{ ceph_release }} <= ceph_release_num.infernalis
- ansible_pkg_mgr == "yum"
- ceph_origin != 'local'
- name: synchronize ceph install
synchronize:
src: "{{ceph_installation_dir}}/"
@ -83,7 +74,6 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
- mon_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
or ceph_origin == "distro"
or ceph_custom
@ -93,7 +83,6 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
- osd_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
or ceph_origin == "distro"
or ceph_custom
@ -103,7 +92,6 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
- mds_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
or ceph_origin == "distro"
or ceph_custom
@ -113,7 +101,6 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
- client_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
or ceph_origin == "distro"
or ceph_dev
or ceph_custom
@ -124,7 +111,6 @@
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
when:
- client_group_name in group_names
- ceph_release_num.{{ ceph_release }} > ceph_release_num.infernalis
or ceph_origin == "distro"
or ceph_custom

View File

@ -1,31 +1,5 @@
---
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry }}/{{ ceph_mds_docker_username }}/{{ ceph_mds_docker_imagename }}:{{ ceph_mds_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:
after_hammer=True
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-mds
when: not after_hammer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -35,4 +9,3 @@
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-mds
when: after_hammer

View File

@ -3,17 +3,17 @@
file:
path: /var/lib/ceph/bootstrap-mds/
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
- name: copy mds bootstrap key
copy:
src: "{{ fetch_directory }}/{{ fsid }}{{ item.name }}"
dest: "{{ item.name }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
owner: "ceph"
group: "ceph"
mode: "0600"
with_items:
- { name: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
@ -25,9 +25,9 @@
file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
- name: create mds keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
@ -39,68 +39,14 @@
- name: set mds key permissions
file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
mode: "{{ key_mode }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
owner: "ceph"
group: "ceph"
mode: "0600"
when: cephx
- name: activate metadata server with upstart
file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
with_items:
- done
- upstart
changed_when: false
when: not use_systemd
- name: activate metadata server with sysvinit
file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
with_items:
- done
- sysvinit
changed_when: false
when: not use_systemd
- name: enable systemd unit file for mds instance (for or after infernalis)
command: systemctl enable ceph-mds@{{ mds_name }}
changed_when: false
failed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: start and add that the metadata service to the init sequence (upstart)
command: initctl emit ceph-mds cluster={{ cluster }} id={{ mds_name }}
changed_when: false
failed_when: false
when: not use_systemd
- name: start and add that the metadata service to the init sequence (systemd before infernalis)
service:
name: ceph
state: started
enabled: yes
args: mds.{{ mds_name }}
changed_when: false
when:
- not use_systemd
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: start and add that the metadata service to the init sequence (systemd after hammer)
- name: start and add that the metadata service to the init sequence
service:
name: ceph-mds@{{ mds_name }}
state: started
enabled: yes
changed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer

View File

@ -8,7 +8,6 @@
always_run: true
when:
- cephx
- ceph_release_num.{{ ceph_release }} > ceph_release_num.jewel
# NOTE (leseb): wait for mon discovery and quorum resolution
# the admin key is not instantaneously created so we have to wait a bit
@ -112,9 +111,9 @@
- name: set keys permissions
file:
path: "{{ item }}"
mode: "{{ key_mode }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
owner: "ceph"
group: "ceph"
mode: "0600"
with_items:
- "{{ ceph_keys.get('stdout_lines') | default([]) }}"
when: cephx

View File

@ -9,24 +9,20 @@
- cephfs_data
- cephfs_metadata
changed_when: false
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.firefly
- name: create ceph filesystem
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false
when: ceph_release_num.{{ ceph_release }} > ceph_release_num.firefly
- name: allow multimds
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it
changed_when: false
when:
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
- mds_allow_multimds
- name: set max_mds
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}
changed_when: false
when:
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
- mds_allow_multimds
- mds_max_mds > 1

View File

@ -24,47 +24,29 @@
- name: set initial monitor key permissions
file:
path: /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
mode: "{{ key_mode }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
owner: "ceph"
group: "ceph"
mode: "0600"
when: cephx
- name: create monitor directory
file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
- name: ceph monitor mkfs with keyring (for or after infernalis release)
- name: ceph monitor mkfs with keyring
command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
- cephx
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: ceph monitor mkfs without keyring (for or after infernalis release)
- name: ceph monitor mkfs without keyring
command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
- not cephx
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: ceph monitor mkfs with keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
- cephx
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: ceph monitor mkfs without keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
- not cephx
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis

View File

@ -1,34 +1,5 @@
---
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry }}/{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}:{{ ceph_mon_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true
register: ceph_version
always_run: true
- set_fact:
after_hamer=True
when:
ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-osd
- /var/lib/ceph/bootstrap-mds
- /var/lib/ceph/bootstrap-rgw
when: not after_hamer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -40,4 +11,3 @@
- /var/lib/ceph/bootstrap-osd
- /var/lib/ceph/bootstrap-mds
- /var/lib/ceph/bootstrap-rgw
when: after_hamer

View File

@ -3,11 +3,9 @@
command: rados --cluster {{ cluster }} lspools
register: ceph_pools
always_run: true
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.hammer
- name: secure the cluster
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
with_nested:
- "{{ ceph_pools.stdout_lines|default([]) }}"
- "{{ secure_cluster_flags }}"
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.hammer

View File

@ -1,62 +1,7 @@
---
- name: activate monitor with upstart
file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
changed_when: false
with_items:
- done
- upstart
when:
- not use_systemd
- name: start and add that the monitor service to the init sequence (ubuntu)
command: initctl emit ceph-mon cluster={{ cluster }} id={{ monitor_name }}
changed_when: false
failed_when: false
when:
- not use_systemd
# legacy ceph system v init scripts require a mon section in order to work
# Not Ubuntu so we can catch old debian systems that don't use systemd or upstart
- name: add mon section into ceph.conf for systemv init scripts
ini_file:
dest: /etc/ceph/{{ cluster }}.conf
section: mon.{{ monitor_name }}
option: host
value: "{{ monitor_name }}"
state: present
when:
- ansible_os_family != "Ubuntu"
- ceph_release_num.{{ ceph_stable_release }} < ceph_release_num.infernalis
# NOTE (jsaintrocc): can't use service module because we need to use the
# legacy systemv init for older ceph releases. Even when the os supports systemd
# Not Ubuntu so we can catch old debian systems that don't use systemd or upstart
- name: start and add that the monitor service to the init sequence
command: service ceph start mon
changed_when: false
when:
- ansible_os_family != "Ubuntu"
- ceph_release_num.{{ ceph_stable_release }} < ceph_release_num.infernalis
- name: start and add that the monitor service to the init sequence (for or after infernalis)
command: systemctl enable ceph-mon@{{ monitor_name }}
changed_when: false
failed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: start the monitor service (for or after infernalis)
- name: start the monitor service
service:
name: ceph-mon@{{ monitor_name }}
state: started
enabled: yes
changed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer

View File

@ -1,32 +1,5 @@
---
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry }}/{{ ceph_nfs_docker_username }}/{{ ceph_nfs_docker_imagename }}:{{ ceph_nfs_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:
after_hammer=True
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
- /var/lib/ceph/
- /var/lib/ceph/radosgw
when: not after_hammer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -37,7 +10,6 @@
- /etc/ceph/
- /var/lib/ceph/
- /var/lib/ceph/radosgw
when: after_hammer
- name: create ganesha directories
file:

View File

@ -3,9 +3,9 @@
file:
path: "{{ item }}"
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
with_items:
- /var/lib/nfs/ganesha
- /var/run/ganesha

View File

@ -81,51 +81,17 @@
- include: osd_fragment.yml
when: crush_location
- name: set selinux to permissive and make it persistent
selinux:
policy: targeted
state: permissive
when:
- ansible_selinux != false
- ansible_selinux['status'] == 'enabled'
- ansible_selinux['config_mode'] != 'disabled'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: start and add that the osd service(s) to the init sequence (before infernalis)
service:
name: ceph
state: started
enabled: yes
when:
- ansible_distribution != "Ubuntu"
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: get osd id (for or after infernalis)
- name: get osd id
shell: 'ls /var/lib/ceph/osd/ | grep -oP "\d+$"'
changed_when: false
failed_when: false
always_run: true
register: osd_id
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: enable the osd service (for or after infernalis)
command: systemctl enable ceph-osd@{{ item }}
changed_when: false
failed_when: false
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: start and add that the osd service(s) to the init sequence (for or after infernalis)
- name: start and add that the osd service(s) to the init sequence
service:
name: ceph-osd@{{ item }}
state: started
enabled: yes
with_items: "{{ (osd_id|default({})).stdout_lines|default([]) }}"
changed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer

View File

@ -1,31 +1,5 @@
---
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry }}/{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}:{{ ceph_osd_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:
after_hamer=True
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-osd
when: not after_hamer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -35,4 +9,3 @@
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-osd
when: after_hamer

View File

@ -36,9 +36,9 @@
file:
path: "{{ item }}"
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
with_items:
- /etc/ceph/ceph.d/
- /etc/ceph/ceph.d/osd_fragments
@ -47,9 +47,9 @@
template:
src: osd.conf.j2
dest: /etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
owner: "ceph"
group: "ceph"
mode: "0644"
with_items: "{{ combined_osd_id.results }}"
- name: copy {{ cluster }}.conf for assembling
@ -60,14 +60,14 @@
assemble:
src: /etc/ceph/ceph.d/osd_fragments/
dest: /etc/ceph/ceph.d/osd.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
owner: "ceph"
group: "ceph"
mode: "0644"
- name: assemble {{ cluster }}.conf and osd fragments
assemble:
src: /etc/ceph/ceph.d/
dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
owner: "ceph"
group: "ceph"
mode: "0644"

View File

@ -16,10 +16,11 @@
file:
path: "{{ item }}"
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
when: cephx
owner: "ceph"
group: "ceph"
mode: "0755"
when:
cephx
with_items:
- /var/lib/ceph/bootstrap-osd/
- /var/lib/ceph/osd/
@ -34,9 +35,9 @@
copy:
src: "{{ fetch_directory }}/{{ fsid }}{{ item.name }}"
dest: "{{ item.name }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
owner: "ceph"
group: "ceph"
mode: "0600"
with_items:
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }

View File

@ -8,8 +8,8 @@
file:
path: "{{ item }}"
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
owner: "ceph"
group: "ceph"
with_items: "{{ osd_directories }}"
# NOTE (leseb): the prepare process must be parallelized somehow...
@ -26,16 +26,8 @@
with_items: "{{ osd_directories }}"
changed_when: false
- name: start and add that the OSD service to the init sequence
service:
name: ceph
state: started
enabled: yes
when: not use_systemd
- name: start and add the OSD target to the systemd sequence
service:
name: ceph.target
state: started
enabled: yes
when: use_systemd

View File

@ -1,30 +1,5 @@
---
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry}}/{{ ceph_rbd_mirror_docker_username }}/{{ ceph_rbd_mirror_docker_imagename }}:{{ ceph_rbd_mirror_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
always_run: true
run_once: true
register: ceph_version
- set_fact:
after_hammer=True
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
when: not after_hammer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -33,4 +8,3 @@
mode: "0755"
with_items:
- /etc/ceph/
when: after_hammer

View File

@ -12,7 +12,7 @@
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
owner: "ceph"
group: "ceph"
mode: "0600"
when: cephx

View File

@ -1,33 +1,7 @@
---
- name: start and add that the rbd mirror service to the init sequence (upstart)
command: initctl emit ceph-rbd-mirror cluster={{ cluster }} id={{ ansible_hostname }}
changed_when: false
failed_when: false
when: not use_systemd
# NOTE (leseb): somehow the service ansible module is messing things up
# as a safety measure we run the raw command
- name: start and add that the rbd mirror service to the init sequence (systemd before infernalis)
command: service ceph start ceph-rbd-mirror
changed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: enable systemd unit file for the rbd mirror service (systemd after hammer)
command: systemctl enable ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}
changed_when: false
failed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: start and add that the rbd mirror service to the init sequence (systemd after hammer)
- name: start and add that the rbd mirror service to the init sequence
service:
name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}"
state: started
enabled: yes
changed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer

View File

@ -1,31 +1,5 @@
---
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry}}/{{ ceph_restapi_docker_username }}/{{ ceph_restapi_docker_imagename }}:{{ ceph_restapi_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
run_once: true
always_run: true
register: ceph_version
- set_fact:
after_hammer=True
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-osd
- /var/lib/ceph/bootstrap-mds
- /var/lib/ceph/bootstrap-rgw
when: not after_hammer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -37,4 +11,3 @@
- /var/lib/ceph/bootstrap-osd
- /var/lib/ceph/bootstrap-mds
- /var/lib/ceph/bootstrap-rgw
when: after_hammer

View File

@ -3,26 +3,26 @@
file:
path: /var/lib/ceph/restapi/ceph-restapi
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
- name: copy ceph rest api keyring
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.restapi.keyring"
dest: "/var/lib/ceph/restapi/ceph-restapi/keyring"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
owner: "ceph"
group: "ceph"
mode: "0600"
when: cephx
- name: activate ceph rest api with upstart
file:
path: /var/lib/ceph/restapi/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
owner: "ceph"
group: "ceph"
mode: "0644"
with_items:
- done
- upstart
@ -33,9 +33,9 @@
file:
path: /var/lib/ceph/restapi/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
owner: "ceph"
group: "ceph"
mode: "0644"
with_items:
- done
- sysvinit

View File

@ -1,31 +1,5 @@
---
# NOTE (leseb): we can not use docker inspect with 'format filed' because of
# https://github.com/ansible/ansible/issues/10156
- name: inspect ceph version
shell: docker inspect "{{ ceph_docker_registry }}/{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}:{{ ceph_rgw_docker_image_tag }}" | awk -F '=' '/CEPH_VERSION/ { gsub ("\",", "", $2); print $2 }' | uniq
changed_when: false
failed_when: false
always_run: true
run_once: true
register: ceph_version
- set_fact:
after_hammer=True
when: ceph_version.stdout not in ['firefly','giant', 'hammer']
- name: create bootstrap directories (for or before hammer)
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-rgw
when: not after_hammer
- name: create bootstrap directories (after hammer)
- name: create bootstrap directories
file:
path: "{{ item }}"
state: directory
@ -35,4 +9,3 @@
with_items:
- /etc/ceph/
- /var/lib/ceph/bootstrap-rgw
when: after_hammer

View File

@ -21,7 +21,6 @@
when:
- rgw_zone is defined
- rgw_multisite
- ( ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel )
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False

View File

@ -3,9 +3,9 @@
file:
path: "{{ item }}"
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
owner: "ceph"
group: "ceph"
mode: "0755"
with_items:
- /var/lib/ceph/bootstrap-rgw
- /var/lib/ceph/radosgw
@ -16,9 +16,9 @@
copy:
src: "{{ fetch_directory }}/{{ fsid }}{{ item.name }}"
dest: "{{ item.name }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
mode: "{{ key_mode }}"
owner: "ceph"
group: "ceph"
mode: "0600"
with_items:
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
@ -33,52 +33,14 @@
changed_when: false
when: cephx
- name: set rados gateway key permissions (for or after the infernalis release)
- name: set rados gateway key permissions
file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
mode: "{{ key_mode }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
owner: "ceph"
group: "ceph"
mode: "0600"
when: cephx
- name: ensure ceph-radosgw systemd unit file is present
command: chkconfig --add ceph-radosgw
args:
creates: /var/run/systemd/generator.late/ceph-radosgw.service
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: activate rados gateway with upstart
file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
with_items:
- done
- upstart
changed_when: false
when:
- ansible_distribution == "Ubuntu"
- not use_systemd
- name: activate rados gateway with sysvinit
file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
mode: "{{ activate_file_mode }}"
with_items:
- done
- sysvinit
changed_when: false
when:
- ansible_distribution != "Ubuntu"
- not use_systemd
- name: generate rados gateway sudoers file
template:
src: ceph.j2

View File

@ -7,41 +7,7 @@
always_run: true
- name: start rgw
command: /etc/init.d/radosgw start
when:
- rgwstatus.rc != 0
- ansible_distribution != "Ubuntu"
- ansible_os_family != 'RedHat'
- not use_systemd
- name: start and add that the rados gateway service to the init sequence (ubuntu)
command: initctl emit radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
changed_when: false
failed_when: false
when: not use_systemd
- name: start rgw on red hat (before or on infernalis)
service:
name: ceph-radosgw
state: started
enabled: yes
when:
- ansible_os_family == 'RedHat'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.infernalis
- name: enable systemd unit file for rgw instance (for or after infernalis)
command: systemctl enable ceph-radosgw@rgw.{{ ansible_hostname }}
changed_when: false
failed_when: false
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer
- name: start rgw with systemd (for or after infernalis)
service:
name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: started
enabled: yes
when:
- use_systemd
- ceph_release_num.{{ ceph_release }} > ceph_release_num.hammer