Merge pull request #2055 from ceph/update-mirror-nfs

upgrade: support for rbd mirror and nfs
pull/2067/head
Guillaume Abrioux 2017-10-17 14:51:39 +02:00 committed by GitHub
commit 2aa53fb0f5
3 changed files with 202 additions and 199 deletions

View File

@ -42,6 +42,8 @@
- "{{ mds_group_name|default('mdss') }}" - "{{ mds_group_name|default('mdss') }}"
- "{{ rgw_group_name|default('rgws') }}" - "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}" - "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbd_mirror_group_name|default('rbdmirrors') }}"
- "{{ nfs_group_name|default('nfss') }}"
- "{{ client_group_name|default('clients') }}" - "{{ client_group_name|default('clients') }}"
become: True become: True
@ -70,26 +72,19 @@
- debug: msg="WARNING - upgrading a ceph cluster with only one monitor node ({{ inventory_hostname }})" - debug: msg="WARNING - upgrading a ceph cluster with only one monitor node ({{ inventory_hostname }})"
when: mon_host_count | int == 1 when: mon_host_count | int == 1
- name: stop ceph mons with upstart - name: fail when single containerized monitor
service: fail:
name: ceph-mon msg: "Upgrades of a single monitor are not supported, also running 1 monitor is not recommended always use 3."
state: stopped when:
args: id={{ ansible_hostname }} - containerized_deployment
when: ansible_service_mgr == 'upstart' - mon_host_count | int == 1
- name: stop ceph mons with sysvinit - name: stop ceph mon
service: systemd:
name: ceph
state: stopped
when: ansible_service_mgr == 'sysvinit'
- name: stop ceph mons with systemd
service:
name: ceph-mon@{{ ansible_hostname }} name: ceph-mon@{{ ansible_hostname }}
state: stopped state: stopped
enabled: yes enabled: yes
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
roles: roles:
@ -100,35 +95,21 @@
- ceph-mon - ceph-mon
post_tasks: post_tasks:
- name: start ceph mons with upstart - name: start ceph mon
service: systemd:
name: ceph-mon
state: started
args: id={{ ansible_hostname }}
when: ansible_service_mgr == 'upstart'
- name: start ceph mons with sysvinit
service:
name: ceph
state: started
when: ansible_service_mgr == 'sysvinit'
- name: start ceph mons with systemd
service:
name: ceph-mon@{{ ansible_hostname }} name: ceph-mon@{{ ansible_hostname }}
state: started state: started
enabled: yes enabled: yes
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
- name: restart containerized ceph mons with systemd - name: restart containerized ceph mon
service: systemd:
name: ceph-mon@{{ ansible_hostname }} name: ceph-mon@{{ ansible_hostname }}
state: restarted state: restarted
enabled: yes enabled: yes
daemon_reload: yes
when: when:
- ansible_service_mgr == 'systemd'
- containerized_deployment - containerized_deployment
- name: set mon_host_count - name: set mon_host_count
@ -169,8 +150,56 @@
when: when:
- containerized_deployment - containerized_deployment
- name: upgrade ceph mgr node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
serial: 1
become: True
pre_tasks:
# this task has a failed_when: false to handle the scenario where no mgr existed before the upgrade
- name: stop ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
state: stopped
enabled: yes
failed_when: false
when:
- not containerized_deployment
roles:
- ceph-defaults
- ceph-config
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- { role: ceph-mgr, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" }
post_tasks:
- name: start ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
state: started
enabled: yes
when:
- not containerized_deployment
- name: restart containerized ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
state: restarted
enabled: yes
daemon_reload: yes
when:
- containerized_deployment
- name: set osd flags - name: set osd flags
command: ceph osd set {{ item }} --cluster {{ cluster }} command: ceph --cluster {{ cluster }} osd set {{ item }}
with_items: with_items:
- noout - noout
- noscrub - noscrub
@ -180,7 +209,7 @@
- name: set containerized osd flags - name: set containerized osd flags
command: | command: |
docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph osd set {{ item }} --cluster {{ cluster }} docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd set {{ item }}
with_items: with_items:
- noout - noout
- noscrub - noscrub
@ -209,26 +238,13 @@
changed_when: false changed_when: false
when: not containerized_deployment when: not containerized_deployment
- name: stop ceph osds with upstart - name: stop ceph osd
service: systemd:
name: ceph-osd-all
state: stopped
when: ansible_service_mgr == 'upstart'
- name: stop ceph osds with sysvinit
service:
name: ceph
state: stopped
when: ansible_service_mgr == 'sysvinit'
- name: stop ceph osds with systemd
service:
name: ceph-osd@{{ item }} name: ceph-osd@{{ item }}
state: stopped state: stopped
enabled: yes enabled: yes
with_items: "{{ osd_ids.stdout_lines }}" with_items: "{{ osd_ids.stdout_lines }}"
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
roles: roles:
@ -245,36 +261,23 @@
changed_when: false changed_when: false
when: not containerized_deployment when: not containerized_deployment
- name: start ceph osds with upstart - name: start ceph osd
service: systemd:
name: ceph-osd-all
state: started
when: ansible_service_mgr == 'upstart'
- name: start ceph osds with sysvinit
service:
name: ceph
state: started
when: ansible_service_mgr == 'sysvinit'
- name: start ceph osds with systemd
service:
name: ceph-osd@{{ item }} name: ceph-osd@{{ item }}
state: started state: started
enabled: yes enabled: yes
with_items: "{{ osd_ids.stdout_lines }}" with_items: "{{ osd_ids.stdout_lines }}"
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
- name: restart containerized ceph osds with systemd - name: restart containerized ceph osd
service: systemd:
name: ceph-osd@{{ item | basename }} name: ceph-osd@{{ item | basename }}
state: restarted state: restarted
enabled: yes enabled: yes
daemon_reload: yes
with_items: "{{ devices }}" with_items: "{{ devices }}"
when: when:
- ansible_service_mgr == 'systemd'
- containerized_deployment - containerized_deployment
- name: set_fact docker_exec_cmd_osd - name: set_fact docker_exec_cmd_osd
@ -359,27 +362,12 @@
become: True become: True
pre_tasks: pre_tasks:
- name: stop ceph mdss with upstart - name: stop ceph mds
service: systemd:
name: ceph-mds
state: stopped
args: id={{ ansible_hostname }}
when: ansible_service_mgr == 'upstart'
- name: stop ceph mdss with sysvinit
service:
name: ceph
state: stopped
args: mds
when: ansible_service_mgr == 'sysvinit'
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }} name: ceph-mds@{{ ansible_hostname }}
state: stopped state: stopped
enabled: yes enabled: yes
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
roles: roles:
@ -390,36 +378,21 @@
- ceph-mds - ceph-mds
post_tasks: post_tasks:
- name: start ceph mdss with upstart - name: start ceph mds
service: systemd:
name: ceph-mds
state: started
args: id={{ ansible_hostname }}
when: ansible_service_mgr == 'upstart'
- name: start ceph mdss with sysvinit
service:
name: ceph
state: started
args: mds
when: ansible_service_mgr == 'sysvinit'
- name: start ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }} name: ceph-mds@{{ ansible_hostname }}
state: started state: started
enabled: yes enabled: yes
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
- name: restart ceph mdss - name: restart ceph mds
service: systemd:
name: ceph-mds@{{ ansible_hostname }} name: ceph-mds@{{ ansible_hostname }}
state: restarted state: restarted
enabled: yes enabled: yes
daemon_reload: yes
when: when:
- ansible_service_mgr == 'systemd'
- containerized_deployment - containerized_deployment
@ -435,25 +408,12 @@
become: True become: True
pre_tasks: pre_tasks:
- name: stop ceph rgws with upstart - name: stop ceph rgw
service: systemd:
name: ceph-radosgw
state: stopped
when: ansible_service_mgr == 'upstart'
- name: stop ceph rgws with sysvinit
service:
name: radosgw
state: stopped
when: ansible_service_mgr == 'sysvinit'
- name: stop ceph rgws with systemd
service:
name: ceph-radosgw@rgw.{{ ansible_hostname }} name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: stopped state: stopped
enabled: yes enabled: yes
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
roles: roles:
@ -464,34 +424,123 @@
- ceph-rgw - ceph-rgw
post_tasks: post_tasks:
- name: start ceph rgws with upstart - name: start ceph rgw
service: systemd:
name: ceph-radosgw
state: started
when: ansible_service_mgr == 'upstart'
- name: start ceph rgws with sysvinit
service:
name: radosgw
state: started
when: ansible_service_mgr == 'sysvinit'
- name: start ceph rgws with systemd
service:
name: ceph-radosgw@rgw.{{ ansible_hostname }} name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: started state: started
enabled: yes enabled: yes
when: when:
- ansible_service_mgr == 'systemd'
- not containerized_deployment - not containerized_deployment
- name: restart containerized ceph rgws with systemd - name: restart containerized ceph rgw
service: systemd:
name: ceph-radosgw@rgw.{{ ansible_hostname }} name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: restarted state: restarted
enabled: yes enabled: yes
daemon_reload: yes
when: when:
- ansible_service_mgr == 'systemd' - containerized_deployment
- name: upgrade ceph rbd mirror node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ rbd_mirror_group_name|default('rbdmirrors') }}"
serial: 1
become: True
pre_tasks:
# NOTE(leseb): these tasks have a 'failed_when: false'
# in case we run before luminous or after
- name: stop ceph rbd mirror before luminous
systemd:
name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}"
state: stopped
enabled: no
failed_when: false
- name: stop ceph rbd mirror for and after luminous
systemd:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
state: stopped
enabled: yes
failed_when: false
roles:
- ceph-defaults
- ceph-config
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-rbd-mirror
post_tasks:
- name: start ceph rbd mirror
systemd:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
state: started
enabled: yes
when:
- not containerized_deployment
- name: restart containerized ceph rbd mirror
systemd:
name: ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}
state: restarted
enabled: yes
daemon_reload: yes
when:
- containerized_deployment
- name: upgrade ceph nfs node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ nfs_group_name|default('nfss') }}"
serial: 1
become: True
pre_tasks:
- name: stop ceph nfs
systemd:
name: nfs-ganesha
state: stopped
enabled: yes
when:
- not containerized_deployment
roles:
- ceph-defaults
- ceph-config
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-nfs
post_tasks:
- name: start nfs gateway
systemd:
name: nfs-ganesha
state: started
enabled: yes
when:
- not containerized_deployment
- ceph_nfs_enable_service
- name: systemd restart nfs container
systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
state: restarted
enabled: yes
daemon_reload: yes
when:
- ceph_nfs_enable_service
- containerized_deployment - containerized_deployment
@ -514,53 +563,6 @@
- ceph-client - ceph-client
- name: upgrade ceph mgr node
vars:
upgrade_ceph_packages: True
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
serial: 1
become: True
pre_tasks:
# this task has a failed_when: false to handle the scenario where no mgr existed before the upgrade
- name: stop ceph mgrs
service:
name: ceph-mgr@{{ ansible_hostname }}
state: stopped
enabled: yes
failed_when: false
when:
- not containerized_deployment
roles:
- ceph-defaults
- ceph-config
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- { role: ceph-mgr, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" }
post_tasks:
- name: start ceph mgrs
service:
name: ceph-mgr@{{ ansible_hostname }}
state: started
enabled: yes
when:
- not containerized_deployment
- name: restart containerized ceph mgrs
service:
name: ceph-mgr@{{ ansible_hostname }}
state: restarted
enabled: yes
when:
- containerized_deployment
- name: show ceph status - name: show ceph status
hosts: hosts:

View File

@ -126,16 +126,15 @@
post_tasks: post_tasks:
# We don't do a container test by running 'docker exec ...' since not all the monitors have switched to containers yet. # We don't do a container test by running 'docker exec ...' since not all the monitors have switched to containers yet.
# Thus, we continue to use the 'ceph' binary from the host, there is no issue with that. # Thus, we continue to use the 'ceph' binary from the host, there is no issue with that.
- name: waiting for the containerized monitor to join the quorum... - name: non container | waiting for the monitor to join the quorum...
shell: | command: ceph --cluster "{{ cluster }}" -s --format json
ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])' register: ceph_health_raw
register: result until: >
until: "{{ ansible_hostname in result.stdout }}" hostvars[mon_host]['ansible_hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
retries: "{{ health_mon_check_retries }}" retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}" delay: "{{ health_mon_check_delay }}"
delegate_to: "{{ mon_host }}" delegate_to: "{{ mon_host }}"
- name: switching from non-containerized to containerized ceph mgr - name: switching from non-containerized to containerized ceph mgr
hosts: hosts:

View File

@ -26,7 +26,8 @@
owner: "root" owner: "root"
group: "root" group: "root"
mode: "0755" mode: "0755"
when: ceph_nfs_dynamic_exports when:
- ceph_nfs_dynamic_exports
- name: create exports dir index file - name: create exports dir index file
copy: copy:
@ -36,7 +37,8 @@
owner: "root" owner: "root"
group: "root" group: "root"
mode: "0644" mode: "0644"
when: ceph_nfs_dynamic_exports when:
- ceph_nfs_dynamic_exports
- name: generate systemd unit file - name: generate systemd unit file
become: true become: true
@ -56,14 +58,14 @@
enabled: yes enabled: yes
daemon_reload: yes daemon_reload: yes
when: when:
- ceph_nfs_enable_service
- containerized_deployment - containerized_deployment
- ceph_nfs_enable_service
- name: start nfs gateway service - name: start nfs gateway service
service: systemd:
name: nfs-ganesha name: nfs-ganesha
state: started state: started
enabled: yes enabled: yes
when: when:
- ceph_nfs_enable_service
- not containerized_deployment - not containerized_deployment
- ceph_nfs_enable_service