infra-plays: add an OS (rhel) upgrade playbook

This adds a playbook for upgrading RHEL7 OS to RHEL8.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1772012

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
guits-upgrade-rhel7-to-rhel8
Guillaume Abrioux 2020-08-18 14:43:48 +02:00
parent 5b73af9c34
commit 84d9dd3c41
8 changed files with 330 additions and 0 deletions

View File

@ -869,7 +869,12 @@ dummy:
#client_connections: {} #client_connections: {}
#########
# LEAPP #
#########
#leapp_data_filename: leapp-data8.tar.gz
#leapp_rhel_release: 7.8
############### ###############
# DEPRECATION # # DEPRECATION #

View File

@ -869,7 +869,12 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert
#client_connections: {} #client_connections: {}
#########
# LEAPP #
#########
#leapp_data_filename: leapp-data8.tar.gz
#leapp_rhel_release: 7.8
############### ###############
# DEPRECATION # # DEPRECATION #

View File

@ -0,0 +1,17 @@
---
- name: get num_pgs
command: "{{ container_exec_cmd_update_osd | default('') }} ceph --cluster {{ cluster }} -s --format json"
register: ceph_pgs
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: waiting for clean pgs...
command: "{{ container_exec_cmd_update_osd | default('') }} ceph --cluster {{ cluster }} -s --format json"
register: ceph_health_post
until: >
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0)
and
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | selectattr('state_name', 'search', '^active\\+clean') | map(attribute='count') | list | sum) == (ceph_pgs.stdout | from_json).pgmap.num_pgs)
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: "{{ health_osd_check_retries }}"
delay: "{{ health_osd_check_delay }}"
when: (ceph_pgs.stdout | from_json).pgmap.num_pgs != 0

View File

@ -0,0 +1,5 @@
---
- name: reboot
reboot:
reboot_timeout: 600
test_command: uptime

View File

@ -0,0 +1,6 @@
---
- name: set osd flags
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set {{ item }}"
with_items:
- noout
- nodeep-scrub

View File

@ -0,0 +1,6 @@
---
- name: unset osd flags
command: "{{ container_exec_cmd_update_osd | default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
with_items:
- noout
- nodeep-scrub

View File

@ -0,0 +1,281 @@
---
- name: check prerequisites
hosts: localhost
connection: local
tasks:
- name: check the leapp data archive is present on the ansible controller
debug:
msg: "{{ lookup('file', leapp_data_filename) }}"
- name: update rhel7 to latest packages
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
- "{{ rgw_group_name | default('rgws') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
- "{{ client_group_name | default('clients') }}"
become: true
tasks:
- import_role:
name: ceph-defaults
- name: enable repos
rhsm_repository:
name: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms']
purge: True
- name: set release
command: subscription-manager release --set {{ leapp_rhel_release }}
changed_when: false
- name: update system
command: yum update -y
changed_when: false
- name: reboot mon nodes
hosts: "{{ mon_group_name | default('mons') }}"
serial: 1
become: true
tasks:
- import_role:
name: ceph-defaults
- name: import_tasks reboot_nodes.yml
import_tasks: reboot_nodes.yml
- name: reboot mgr nodes
hosts: "{{ mgr_group_name | default('mgrs') }}"
serial: 1
become: true
tasks:
- import_role:
name: ceph-defaults
- name: import_tasks reboot_nodes.yml
import_tasks: reboot_nodes.yml
- name: set osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
tasks:
- import_role:
name: ceph-defaults
- name: import_tasks set_osd_flags.yml
import_tasks: set_osd_flags.yml
- name: reboot osd nodes
hosts: "{{ osd_group_name | default('osds') }}"
serial: 1
become: true
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment | bool
- name: import_tasks reboot_nodes.yml
import_tasks: reboot_nodes.yml
- name: import_tasks pgs_check.yml
import_tasks: pgs_check.yml
- name: unset osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment | bool
- name: import_tasks unset_osd_flags.yml
import_tasks: unset_osd_flags.yml
- name: reboot ceph mdss nodes, deactivate all rank > 0
hosts: "{{ groups[mon_group_name|default('mons')][0] }}"
become: true
tasks:
- name: deactivate all mds rank > 0
when: groups.get(mds_group_name, []) | length > 0
block:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- name: deactivate all mds rank > 0 if any
when: groups.get(mds_group_name, []) | length > 1
block:
- name: set max_mds 1 on ceph fs
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds 1"
changed_when: false
- name: wait until only rank 0 is up
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }} -f json"
changed_when: false
register: wait_rank_zero
retries: 720
delay: 5
until: (wait_rank_zero.stdout | from_json).mdsmap.in | length == 1 and (wait_rank_zero.stdout | from_json).mdsmap.in[0] == 0
- name: get name of remaining active mds
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
changed_when: false
register: _mds_active_name
- name: set_fact mds_active_name
set_fact:
mds_active_name: "{{ (_mds_active_name.stdout | from_json)['filesystems'][0]['mdsmap']['info'][item.key]['name'] }}"
with_dict: "{{ (_mds_active_name.stdout | default('{}') | from_json).filesystems[0]['mdsmap']['info'] | default({}) }}"
- name: set_fact mds_active_host
set_fact:
mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
with_items: "{{ groups[mds_group_name] }}"
when: hostvars[item]['ansible_hostname'] == mds_active_name
- name: create standby_mdss group
add_host:
name: "{{ item }}"
groups: standby_mdss
ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
with_items: "{{ groups[mds_group_name] | difference(mds_active_host) }}"
- hosts: "{{ client_group_name | default('clients') }}"
become: true
tasks:
- import_role:
name: ceph-defaults
- name: reboot
reboot:
reboot_timeout: 600
test_command: uptime
- hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
- "{{ rgw_group_name | default('rgws') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
- "{{ client_group_name | default('clients') }}"
- import_role:
name: ceph-defaults
- name: install leapp
package:
name: leapp
state: present
- name: untar leapp tarball
unarchive:
src: leapp-data8.tar.gz
dest: /etc/leapp/files
changed_when: false
- name: run leapp preupgrade
command: leapp preupgrade
register: leapp_preupgrade_result
changed_when: false
- name: run leapp upgrade
command: leapp upgrade
changed_when: false
- hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
- "{{ rgw_group_name | default('rgws') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
serial: 1
become: true
tasks:
- import_role:
name: ceph-defaults
- name: reboot
reboot:
reboot_timeout: 3600
test_command: uptime
- hosts: "{{ client_group_name | default('clients') }}"
become: true
tasks:
- import_role:
name: ceph-defaults
- name: reboot
reboot:
reboot_timeout: 3600
test_command: uptime
- hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
- "{{ rgw_group_name | default('rgws') }}"
- "{{ nfs_group_name | default('nfss') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
- "{{ client_group_name | default('clients') }}"
become: true
tasks:
- import_role:
name: ceph-defaults
- name: set_fact ansible_python_interpreter
set_fact:
ansible_python_interpreter: /usr/bin/python3
- name: Enable SELinux
selinux:
policy: targeted
state: enforcing
# enable RHCS4-EL8 repo?

View File

@ -861,7 +861,12 @@ rbd_devices: {}
client_connections: {} client_connections: {}
#########
# LEAPP #
#########
leapp_data_filename: leapp-data8.tar.gz
leapp_rhel_release: 7.8
############### ###############
# DEPRECATION # # DEPRECATION #