mirror of https://github.com/ceph/ceph-ansible.git
pull/1386/head
commit
ac1498b0d7
20
Makefile
20
Makefile
|
@ -2,6 +2,19 @@
|
||||||
# Try "make" (for SRPMS) or "make rpm"
|
# Try "make" (for SRPMS) or "make rpm"
|
||||||
|
|
||||||
NAME = ceph-ansible
|
NAME = ceph-ansible
|
||||||
|
|
||||||
|
# Set the RPM package NVR from "git describe".
|
||||||
|
# Examples:
|
||||||
|
#
|
||||||
|
# A "git describe" value of "v2.2.0rc1" would create an NVR
|
||||||
|
# "ceph-ansible-2.2.0-0.rc1.1.el7"
|
||||||
|
#
|
||||||
|
# A "git describe" value of "v2.2.0rc1-1-gc465f85" would create an NVR
|
||||||
|
# "ceph-ansible-2.2.0-0.rc1.1.gc465f85.el7"
|
||||||
|
#
|
||||||
|
# A "git describe" value of "v2.2.0" creates an NVR
|
||||||
|
# "ceph-ansible-2.2.0-1.el7"
|
||||||
|
|
||||||
VERSION := $(shell git describe --tags --abbrev=0 --match 'v*' | sed 's/^v//')
|
VERSION := $(shell git describe --tags --abbrev=0 --match 'v*' | sed 's/^v//')
|
||||||
COMMIT := $(shell git rev-parse HEAD)
|
COMMIT := $(shell git rev-parse HEAD)
|
||||||
SHORTCOMMIT := $(shell echo $(COMMIT) | cut -c1-7)
|
SHORTCOMMIT := $(shell echo $(COMMIT) | cut -c1-7)
|
||||||
|
@ -10,7 +23,12 @@ RELEASE := $(shell git describe --tags --match 'v*' \
|
||||||
| sed 's/^[^-]*-//' \
|
| sed 's/^[^-]*-//' \
|
||||||
| sed 's/-/./')
|
| sed 's/-/./')
|
||||||
ifeq ($(VERSION),$(RELEASE))
|
ifeq ($(VERSION),$(RELEASE))
|
||||||
RELEASE = 0
|
RELEASE = 1
|
||||||
|
endif
|
||||||
|
ifneq (,$(findstring rc,$(VERSION)))
|
||||||
|
RC := $(shell echo $(VERSION) | sed 's/.*rc/rc/')
|
||||||
|
RELEASE := 0.$(RC).$(RELEASE)
|
||||||
|
VERSION := $(subst $(RC),,$(VERSION))
|
||||||
endif
|
endif
|
||||||
NVR := $(NAME)-$(VERSION)-$(RELEASE).el7
|
NVR := $(NAME)-$(VERSION)-$(RELEASE).el7
|
||||||
|
|
||||||
|
|
|
@ -293,8 +293,8 @@ dummy:
|
||||||
#
|
#
|
||||||
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
|
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
|
||||||
#radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
|
#radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
|
||||||
#radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
|
#radosgw_civetweb_port: 8080
|
||||||
#radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}"
|
#radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
|
||||||
#radosgw_civetweb_num_threads: 50
|
#radosgw_civetweb_num_threads: 50
|
||||||
# For additional civetweb configuration options available such as SSL, logging,
|
# For additional civetweb configuration options available such as SSL, logging,
|
||||||
# keepalive, and timeout settings, please see the civetweb docs at
|
# keepalive, and timeout settings, please see the civetweb docs at
|
||||||
|
@ -383,7 +383,6 @@ dummy:
|
||||||
# - { name: kernel.pid_max, value: 4194303 }
|
# - { name: kernel.pid_max, value: 4194303 }
|
||||||
# - { name: fs.file-max, value: 26234859 }
|
# - { name: fs.file-max, value: 26234859 }
|
||||||
# - { name: vm.zone_reclaim_mode, value: 0 }
|
# - { name: vm.zone_reclaim_mode, value: 0 }
|
||||||
# - { name: vm.vfs_cache_pressure, value: 50 }
|
|
||||||
# - { name: vm.swappiness, value: 10 }
|
# - { name: vm.swappiness, value: 10 }
|
||||||
# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
|
# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
|
||||||
|
|
||||||
|
@ -410,3 +409,7 @@ dummy:
|
||||||
# Set this to true to enable Object access via NFS. Requires an RGW role.
|
# Set this to true to enable Object access via NFS. Requires an RGW role.
|
||||||
#nfs_obj_gw: false
|
#nfs_obj_gw: false
|
||||||
|
|
||||||
|
# this is only here for usage with the rolling_update.yml playbook
|
||||||
|
# do not ever change this here
|
||||||
|
#rolling_update: false
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
# Variables here are applicable to all host groups NOT roles
|
||||||
|
|
||||||
|
# This sample file generated by generate_group_vars_sample.sh
|
||||||
|
|
||||||
|
# Dummy variable to avoid error because ansible does not recognize the
|
||||||
|
# file as a good configuration file when no variable in it.
|
||||||
|
dummy:
|
||||||
|
|
||||||
|
#ceph_docker_registry: docker.io
|
||||||
|
#ceph_docker_enable_centos_extra_repo: false
|
||||||
|
|
||||||
|
# Set uid/gid to default '64045' for bootstrap directories.
|
||||||
|
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
|
||||||
|
# These values have to be set according to the base OS used by the container image, NOT the host.
|
||||||
|
#bootstrap_dirs_owner: "64045"
|
||||||
|
#bootstrap_dirs_group: "64045"
|
||||||
|
|
|
@ -42,6 +42,9 @@ dummy:
|
||||||
# Enable the Calamari-backed REST API on a Monitor
|
# Enable the Calamari-backed REST API on a Monitor
|
||||||
#calamari: false
|
#calamari: false
|
||||||
|
|
||||||
|
# Enable debugging for Calamari
|
||||||
|
#calamari_debug: false
|
||||||
|
|
||||||
#############
|
#############
|
||||||
# OPENSTACK #
|
# OPENSTACK #
|
||||||
#############
|
#############
|
||||||
|
@ -74,7 +77,7 @@ dummy:
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
#docker_exec_cmd:
|
||||||
#mon_containerized_deployment: false
|
#mon_containerized_deployment: false
|
||||||
#mon_containerized_deployment_with_kv: false
|
#mon_containerized_deployment_with_kv: false
|
||||||
# This is currently in ceph-common defaults because it is shared with ceph-nfs
|
# This is currently in ceph-common defaults because it is shared with ceph-nfs
|
||||||
|
|
|
@ -59,7 +59,7 @@ dummy:
|
||||||
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
|
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
|
||||||
|
|
||||||
#crush_location: false
|
#crush_location: false
|
||||||
#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
|
#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
|
||||||
|
|
||||||
|
|
||||||
##############
|
##############
|
||||||
|
@ -119,7 +119,7 @@ dummy:
|
||||||
|
|
||||||
# II. Second scenario: N journal devices for N OSDs
|
# II. Second scenario: N journal devices for N OSDs
|
||||||
# Use 'true' for 'raw_multi_journal' to enable this scenario
|
# Use 'true' for 'raw_multi_journal' to enable this scenario
|
||||||
# List devices under 'devices' variable above and
|
# List devices under 'devices' variable above and
|
||||||
# write journal devices for those under 'raw_journal_devices'
|
# write journal devices for those under 'raw_journal_devices'
|
||||||
# In the following example:
|
# In the following example:
|
||||||
# * sdb and sdc will get sdf as a journal
|
# * sdb and sdc will get sdf as a journal
|
||||||
|
@ -134,6 +134,11 @@ dummy:
|
||||||
# - /dev/sdf
|
# - /dev/sdf
|
||||||
# - /dev/sdg
|
# - /dev/sdg
|
||||||
# - /dev/sdg
|
# - /dev/sdg
|
||||||
|
#
|
||||||
|
# NOTE(leseb):
|
||||||
|
# On a containerized scenario we only support A SINGLE journal
|
||||||
|
# for all the OSDs on a given machine. If you don't, bad things will happen
|
||||||
|
# This is a limitation we plan to fix at some point.
|
||||||
#raw_journal_devices: []
|
#raw_journal_devices: []
|
||||||
|
|
||||||
|
|
||||||
|
@ -176,11 +181,31 @@ dummy:
|
||||||
#kv_type: etcd
|
#kv_type: etcd
|
||||||
#kv_endpoint: 127.0.0.1
|
#kv_endpoint: 127.0.0.1
|
||||||
#kv_port: 4001
|
#kv_port: 4001
|
||||||
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
|
||||||
#ceph_docker_image: "ceph/daemon"
|
#ceph_docker_image: "ceph/daemon"
|
||||||
#ceph_docker_image_tag: latest
|
#ceph_docker_image_tag: latest
|
||||||
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
|
|
||||||
#ceph_osd_docker_devices: "{{ devices }}"
|
|
||||||
#ceph_docker_on_openstack: false
|
|
||||||
#ceph_config_keys: [] # DON'T TOUCH ME
|
#ceph_config_keys: [] # DON'T TOUCH ME
|
||||||
|
#ceph_docker_on_openstack: false
|
||||||
|
|
||||||
|
# PREPARE DEVICE
|
||||||
|
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
|
||||||
|
# This is why we use [0] in the example.
|
||||||
|
#
|
||||||
|
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# Journal collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
|
# Dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
|
||||||
|
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
|
||||||
|
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
|
||||||
|
#
|
||||||
|
#ceph_osd_docker_devices: "{{ devices }}"
|
||||||
|
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
|
|
||||||
|
# ACTIVATE DEVICE
|
||||||
|
# Examples:
|
||||||
|
# Journal collocated or Dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||||
|
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
|
||||||
|
#
|
||||||
|
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||||
|
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
||||||
|
|
||||||
|
|
|
@ -25,11 +25,6 @@ dummy:
|
||||||
#
|
#
|
||||||
#cephx: true
|
#cephx: true
|
||||||
|
|
||||||
# Used for the sudo exception while starting the radosgw process
|
|
||||||
# a new entry /etc/sudoers.d/ceph will be created
|
|
||||||
# allowing root to not require tty
|
|
||||||
#radosgw_user: root
|
|
||||||
|
|
||||||
# Multi-site remote pull URL variables
|
# Multi-site remote pull URL variables
|
||||||
#rgw_pull_port: "{{ radosgw_civetweb_port }}"
|
#rgw_pull_port: "{{ radosgw_civetweb_port }}"
|
||||||
#rgw_pull_proto: "http"
|
#rgw_pull_proto: "http"
|
||||||
|
|
|
@ -31,15 +31,15 @@
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name }}"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
- name: load a variable file for devices partition
|
- name: load a variable file for devices partition
|
||||||
include_vars: "{{ item }}"
|
include_vars: "{{ item }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
- files:
|
- files:
|
||||||
- "host_vars/{{ ansible_hostname }}.yml"
|
- "host_vars/{{ ansible_hostname }}.yml"
|
||||||
- "host_vars/default.yml"
|
- "host_vars/default.yml"
|
||||||
skip: true
|
skip: true
|
||||||
|
|
||||||
- name: exit playbook, if devices not defined
|
- name: exit playbook, if devices not defined
|
||||||
fail:
|
fail:
|
||||||
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_hostname }}.yml"
|
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_hostname }}.yml"
|
||||||
|
|
|
@ -33,23 +33,14 @@
|
||||||
|
|
||||||
- name: gather facts on all hosts
|
- name: gather facts on all hosts
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
client_group_name: clients
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
- "{{ client_group_name }}"
|
- "{{ client_group_name|default('clients') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
@ -62,7 +53,7 @@
|
||||||
mds_group_name: mdss
|
mds_group_name: mdss
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -93,7 +84,7 @@
|
||||||
rgw_group_name: rgws
|
rgw_group_name: rgws
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -124,7 +115,7 @@
|
||||||
rbdmirror_group_name: rbd-mirrors
|
rbdmirror_group_name: rbd-mirrors
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -150,7 +141,7 @@
|
||||||
nfs_group_name: nfss
|
nfs_group_name: nfss
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -180,7 +171,7 @@
|
||||||
osd_group_name: osds
|
osd_group_name: osds
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -358,7 +349,7 @@
|
||||||
restapi_group_name: restapis
|
restapi_group_name: restapis
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -390,14 +381,6 @@
|
||||||
- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
|
- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
client_group_name: clients
|
|
||||||
|
|
||||||
# When set to true both groups of packages are purged.
|
# When set to true both groups of packages are purged.
|
||||||
# This can cause problem with qemu-kvm
|
# This can cause problem with qemu-kvm
|
||||||
purge_all_packages: true
|
purge_all_packages: true
|
||||||
|
@ -423,13 +406,13 @@
|
||||||
- python-rbd
|
- python-rbd
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
- "{{ client_group_name }}"
|
- "{{ client_group_name|default('clients') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -554,15 +537,6 @@
|
||||||
|
|
||||||
- name: purge fetch directory
|
- name: purge fetch directory
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbdmirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
restapi_group_name: restapis
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- localhost
|
- localhost
|
||||||
|
|
||||||
|
|
|
@ -46,21 +46,12 @@
|
||||||
|
|
||||||
- name: purge ceph mds cluster
|
- name: purge ceph mds cluster
|
||||||
|
|
||||||
vars:
|
|
||||||
mds_group_name: mdss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-mds/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ mds_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: disable ceph mds service
|
- name: disable ceph mds service
|
||||||
service:
|
service:
|
||||||
|
@ -72,7 +63,7 @@
|
||||||
- name: remove ceph mds container
|
- name: remove ceph mds container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-mds-{{ ansible_hostname }}"
|
||||||
state: absent
|
state: absent
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -93,21 +84,12 @@
|
||||||
|
|
||||||
- name: purge ceph rgw cluster
|
- name: purge ceph rgw cluster
|
||||||
|
|
||||||
vars:
|
|
||||||
rgw_group_name: rgws
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-rgw/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ rgw_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: disable ceph rgw service
|
- name: disable ceph rgw service
|
||||||
service:
|
service:
|
||||||
|
@ -119,7 +101,7 @@
|
||||||
- name: remove ceph rgw container
|
- name: remove ceph rgw container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-rgw-{{ ansible_hostname }}"
|
||||||
state: absent
|
state: absent
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -140,21 +122,12 @@
|
||||||
|
|
||||||
- name: purge ceph rbd-mirror cluster
|
- name: purge ceph rbd-mirror cluster
|
||||||
|
|
||||||
vars:
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ rbdmirror_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: disable ceph rbd-mirror service
|
- name: disable ceph rbd-mirror service
|
||||||
service:
|
service:
|
||||||
|
@ -166,7 +139,7 @@
|
||||||
- name: remove ceph rbd-mirror container
|
- name: remove ceph rbd-mirror container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-rbd-mirror-{{ ansible_hostname }}"
|
||||||
state: absent
|
state: absent
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -187,21 +160,12 @@
|
||||||
|
|
||||||
- name: purge ceph nfs cluster
|
- name: purge ceph nfs cluster
|
||||||
|
|
||||||
vars:
|
|
||||||
nfs_group_name: nfss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-nfs/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ nfs_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: disable ceph nfs service
|
- name: disable ceph nfs service
|
||||||
service:
|
service:
|
||||||
|
@ -213,7 +177,7 @@
|
||||||
- name: remove ceph nfs container
|
- name: remove ceph nfs container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-nfs-{{ ansible_hostname }}"
|
||||||
state: absent
|
state: absent
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -243,21 +207,12 @@
|
||||||
|
|
||||||
- name: purge ceph osd cluster
|
- name: purge ceph osd cluster
|
||||||
|
|
||||||
vars:
|
|
||||||
osd_group_name: osds
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-osd/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ osd_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: disable ceph osd service
|
- name: disable ceph osd service
|
||||||
service:
|
service:
|
||||||
|
@ -270,7 +225,7 @@
|
||||||
- name: remove ceph osd prepare container
|
- name: remove ceph osd prepare container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}-osd-prepare-{{ item | regex_replace('/', '') }}"
|
name: "ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
with_items: "{{ ceph_osd_docker_devices }}"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
@ -278,49 +233,41 @@
|
||||||
- name: remove ceph osd container
|
- name: remove ceph osd container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}"
|
name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
with_items: "{{ ceph_osd_docker_devices }}"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: zap ceph osd disk
|
- name: zap ceph osd disks
|
||||||
docker:
|
shell: |
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
docker run \
|
||||||
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
|
--privileged=true \
|
||||||
net: host
|
--name ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }} \
|
||||||
pid: host
|
-v /dev/:/dev/ \
|
||||||
state: started
|
-e OSD_DEVICE={{ item }} \
|
||||||
privileged: yes
|
{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
|
||||||
env: "CEPH_DAEMON=zap_device,OSD_DEVICE={{ item }}"
|
zap_device
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run"
|
with_items:
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
- "{{ ceph_osd_docker_devices }}"
|
||||||
|
- "{{ raw_journal_devices }}"
|
||||||
|
|
||||||
|
- name: wait until the zap containers die
|
||||||
|
shell: |
|
||||||
|
docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}-dev
|
||||||
|
register: zap_alive
|
||||||
|
failed_when: false
|
||||||
|
until: zap_alive.rc != 0
|
||||||
|
retries: 5
|
||||||
|
delay: 10
|
||||||
|
|
||||||
- name: remove ceph osd zap disk container
|
- name: remove ceph osd zap disk container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
|
name: "ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
with_items:
|
||||||
|
- "{{ ceph_osd_docker_devices }}"
|
||||||
# zap twice
|
- "{{ raw_journal_devices }}"
|
||||||
- name: zap ceph osd disk
|
|
||||||
docker:
|
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
|
||||||
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
|
|
||||||
net: host
|
|
||||||
pid: host
|
|
||||||
state: started
|
|
||||||
privileged: yes
|
|
||||||
env: "CEPH_DAEMON=zap_device,OSD_DEVICE={{ item }}"
|
|
||||||
volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev:/dev,/run:/run"
|
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
|
||||||
|
|
||||||
- name: remove ceph osd zap disk container
|
|
||||||
docker:
|
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
|
||||||
name: "{{ ansible_hostname }}-osd-zap-{{ item | regex_replace('/', '') }}"
|
|
||||||
state: absent
|
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
|
||||||
|
|
||||||
- name: remove ceph osd service
|
- name: remove ceph osd service
|
||||||
file:
|
file:
|
||||||
|
@ -339,25 +286,12 @@
|
||||||
|
|
||||||
- name: purge ceph mon cluster
|
- name: purge ceph mon cluster
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
restapi_group_name: restapis
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-mon/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-restapi/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ mon_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ restapi_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: disable ceph mon service
|
- name: disable ceph mon service
|
||||||
service:
|
service:
|
||||||
|
@ -369,14 +303,14 @@
|
||||||
- name: remove ceph mon container
|
- name: remove ceph mon container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-mon-{{ ansible_hostname }}"
|
||||||
state: absent
|
state: absent
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: remove restapi container
|
- name: remove restapi container
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}-ceph-restapi"
|
name: "ceph-restapi-{{ ansible_hostname }}"
|
||||||
state: absent
|
state: absent
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -397,21 +331,13 @@
|
||||||
|
|
||||||
- name: remove installed packages
|
- name: remove installed packages
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
@ -567,21 +493,13 @@
|
||||||
|
|
||||||
- name: purge ceph directories
|
- name: purge ceph directories
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
|
|
||||||
gather_facts: false # Already gathered previously
|
gather_facts: false # Already gathered previously
|
||||||
|
|
||||||
|
@ -600,38 +518,17 @@
|
||||||
|
|
||||||
- name: purge fetch directory
|
- name: purge fetch directory
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
restapi_group_name: restapis
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- localhost
|
- localhost
|
||||||
|
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
- name: set fetch_directory value if not set
|
||||||
failed_when: false
|
set_fact:
|
||||||
- include_vars: group_vars/{{ mds_group_name }}.yml
|
fetch_directory: "fetch/"
|
||||||
failed_when: false
|
when: fetch_directory is not defined
|
||||||
- include_vars: group_vars/{{ rgw_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ rbdmirror_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ nfs_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ osd_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ mon_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ restapi_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: purge fetch directory for localhost
|
- name: purge fetch directory for localhost
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -34,17 +34,12 @@
|
||||||
|
|
||||||
|
|
||||||
- name: gather facts and check the init system
|
- name: gather facts and check the init system
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
|
|
||||||
become: True
|
become: True
|
||||||
tasks:
|
tasks:
|
||||||
|
@ -55,13 +50,12 @@
|
||||||
- name: upgrade ceph mon cluster
|
- name: upgrade ceph mon cluster
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
mon_group_name: mons
|
|
||||||
health_mon_check_retries: 5
|
health_mon_check_retries: 5
|
||||||
health_mon_check_delay: 10
|
health_mon_check_delay: 10
|
||||||
upgrade_ceph_packages: True
|
upgrade_ceph_packages: True
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: True
|
become: True
|
||||||
|
@ -78,7 +72,7 @@
|
||||||
name: ceph-mon
|
name: ceph-mon
|
||||||
state: stopped
|
state: stopped
|
||||||
args: id={{ ansible_hostname }}
|
args: id={{ ansible_hostname }}
|
||||||
when: ansible_service_mgr == 'systemd'
|
when: ansible_service_mgr == 'upstart'
|
||||||
|
|
||||||
- name: stop ceph mons with sysvinit
|
- name: stop ceph mons with sysvinit
|
||||||
service:
|
service:
|
||||||
|
@ -148,7 +142,7 @@
|
||||||
|
|
||||||
- name: waiting for the monitor to join the quorum...
|
- name: waiting for the monitor to join the quorum...
|
||||||
shell: |
|
shell: |
|
||||||
ceph -s --cluster {{ cluster }} | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
ceph -s --cluster {{ cluster }} | grep election | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||||
register: result
|
register: result
|
||||||
until: result.rc == 0
|
until: result.rc == 0
|
||||||
retries: "{{ health_mon_check_retries }}"
|
retries: "{{ health_mon_check_retries }}"
|
||||||
|
@ -158,7 +152,7 @@
|
||||||
|
|
||||||
- name: waiting for the containerized monitor to join the quorum...
|
- name: waiting for the containerized monitor to join the quorum...
|
||||||
shell: |
|
shell: |
|
||||||
docker exec {{ hostvars[mon_host]['ansible_hostname'] }} ceph -s --cluster {{ cluster }} | grep quorum | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph -s --cluster {{ cluster }} | grep quorum | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||||
register: result
|
register: result
|
||||||
until: result.rc == 0
|
until: result.rc == 0
|
||||||
retries: "{{ health_mon_check_retries }}"
|
retries: "{{ health_mon_check_retries }}"
|
||||||
|
@ -170,13 +164,12 @@
|
||||||
- name: upgrade ceph osds cluster
|
- name: upgrade ceph osds cluster
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
osd_group_name: osds
|
|
||||||
health_osd_check_retries: 40
|
health_osd_check_retries: 40
|
||||||
health_osd_check_delay: 30
|
health_osd_check_delay: 30
|
||||||
upgrade_ceph_packages: True
|
upgrade_ceph_packages: True
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: True
|
become: True
|
||||||
|
@ -193,7 +186,7 @@
|
||||||
|
|
||||||
- name: set containerized osd flags
|
- name: set containerized osd flags
|
||||||
command: |
|
command: |
|
||||||
docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph osd set {{ item }} --cluster {{ cluster }}
|
docker exec ceph-osd-{{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph osd set {{ item }} --cluster {{ cluster }}
|
||||||
with_items:
|
with_items:
|
||||||
- noout
|
- noout
|
||||||
- noscrub
|
- noscrub
|
||||||
|
@ -273,7 +266,7 @@
|
||||||
|
|
||||||
- name: waiting for clean pgs...
|
- name: waiting for clean pgs...
|
||||||
shell: |
|
shell: |
|
||||||
test "$(ceph pg stat --cluster {{ cluster }} | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(ceph pg stat --cluster {{ cluster }} | sed 's/pgs.*//;s/^.*://;s/ //')" && ceph health --cluster {{ cluster }} | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
test "$(ceph pg stat --cluster {{ cluster }} | tr , '\n' | sed 's/^.*pgs: //' | awk '/active.clean/ { SUM += $1 } END {print SUM}')" -eq "$(ceph pg stat --cluster {{ cluster }} | sed 's/pgs.*//;s/^.*://;s/ //')" && ceph health --cluster {{ cluster }} | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
||||||
register: result
|
register: result
|
||||||
until: result.rc == 0
|
until: result.rc == 0
|
||||||
retries: "{{ health_osd_check_retries }}"
|
retries: "{{ health_osd_check_retries }}"
|
||||||
|
@ -283,7 +276,7 @@
|
||||||
|
|
||||||
- name: container - waiting for clean pgs...
|
- name: container - waiting for clean pgs...
|
||||||
shell: |
|
shell: |
|
||||||
test "$(docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph pg stat --cluster {{ cluster }} | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph pg stat --cluster {{ cluster }} | sed 's/pgs.*//;s/^.*://;s/ //')" && docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph health --cluster {{ cluster }} | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
test "$(docker exec ceph-osd-{{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph pg stat --cluster {{ cluster }} | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph pg stat --cluster {{ cluster }} | sed 's/pgs.*//;s/^.*://;s/ //')" && docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph health --cluster {{ cluster }} | egrep -sq "HEALTH_OK|HEALTH_WARN"
|
||||||
register: result
|
register: result
|
||||||
until: result.rc == 0
|
until: result.rc == 0
|
||||||
retries: "{{ health_osd_check_retries }}"
|
retries: "{{ health_osd_check_retries }}"
|
||||||
|
@ -302,7 +295,7 @@
|
||||||
|
|
||||||
- name: unset containerized osd flags
|
- name: unset containerized osd flags
|
||||||
command: |
|
command: |
|
||||||
docker exec {{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph osd unset {{ item }} --cluster {{ cluster }}
|
docker exec ceph-osd-{{ hostvars[groups.mons[0]]['ansible_hostname'] }} ceph osd unset {{ item }} --cluster {{ cluster }}
|
||||||
with_items:
|
with_items:
|
||||||
- noout
|
- noout
|
||||||
- noscrub
|
- noscrub
|
||||||
|
@ -314,11 +307,10 @@
|
||||||
- name: upgrade ceph mdss cluster
|
- name: upgrade ceph mdss cluster
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
mds_group_name: mdss
|
|
||||||
upgrade_ceph_packages: True
|
upgrade_ceph_packages: True
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: True
|
become: True
|
||||||
|
@ -387,11 +379,10 @@
|
||||||
- name: upgrade ceph rgws cluster
|
- name: upgrade ceph rgws cluster
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
rgw_group_name: rgws
|
|
||||||
upgrade_ceph_packages: True
|
upgrade_ceph_packages: True
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: True
|
become: True
|
||||||
|
|
|
@ -28,21 +28,13 @@
|
||||||
|
|
||||||
- name: make sure docker is present and started
|
- name: make sure docker is present and started
|
||||||
|
|
||||||
vars:
|
|
||||||
mon_group_name: mons
|
|
||||||
osd_group_name: osds
|
|
||||||
mds_group_name: mdss
|
|
||||||
rgw_group_name: rgws
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
nfs_group_name: nfss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
|
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
@ -97,21 +89,12 @@
|
||||||
restapi_group_name: restapis
|
restapi_group_name: restapis
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mon_group_name }}"
|
- "{{ mon_group_name|default('mons') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-mon/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-restapi/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ mon_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ restapi_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: select a running monitor
|
- name: select a running monitor
|
||||||
set_fact: mon_host={{ item }}
|
set_fact: mon_host={{ item }}
|
||||||
|
@ -163,7 +146,7 @@
|
||||||
- name: start ceph mon container image
|
- name: start ceph mon container image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-mon-{{ ansible_hostname }}"
|
||||||
net: "host"
|
net: "host"
|
||||||
state: "running"
|
state: "running"
|
||||||
privileged: "{{ mon_docker_privileged }}"
|
privileged: "{{ mon_docker_privileged }}"
|
||||||
|
@ -187,18 +170,12 @@
|
||||||
osd_group_name: osds
|
osd_group_name: osds
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ osd_group_name }}"
|
- "{{ osd_group_name|default('osds') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-osd/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ osd_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: collect osd ids
|
- name: collect osd ids
|
||||||
shell: |
|
shell: |
|
||||||
|
@ -266,7 +243,7 @@
|
||||||
- name: start ceph osd container image(s)
|
- name: start ceph osd container image(s)
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}"
|
name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
|
||||||
net: host
|
net: host
|
||||||
pid: host
|
pid: host
|
||||||
state: started
|
state: started
|
||||||
|
@ -288,22 +265,13 @@
|
||||||
|
|
||||||
- name: switching from non-containerized to containerized ceph mds
|
- name: switching from non-containerized to containerized ceph mds
|
||||||
|
|
||||||
vars:
|
|
||||||
mds_group_name: mdss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ mds_group_name }}"
|
- "{{ mds_group_name|default('mdss') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-mds/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ mds_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: stop ceph mds service
|
- name: stop ceph mds service
|
||||||
service:
|
service:
|
||||||
|
@ -339,7 +307,7 @@
|
||||||
- name: start ceph metadata container image
|
- name: start ceph metadata container image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: ceph-{{ ansible_hostname }}-mds
|
name: ceph-mds-{{ ansible_hostname }}
|
||||||
net: host
|
net: host
|
||||||
state: running
|
state: running
|
||||||
env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}"
|
env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}"
|
||||||
|
@ -348,22 +316,13 @@
|
||||||
|
|
||||||
- name: switching from non-containerized to containerized ceph rgw
|
- name: switching from non-containerized to containerized ceph rgw
|
||||||
|
|
||||||
vars:
|
|
||||||
rgw_group_name: rgws
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rgw_group_name }}"
|
- "{{ rgw_group_name|default('rgws') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-rgw/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ rgw_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: stop ceph rgw service
|
- name: stop ceph rgw service
|
||||||
service:
|
service:
|
||||||
|
@ -399,7 +358,7 @@
|
||||||
- name: start ceph rados gateway container image
|
- name: start ceph rados gateway container image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: ceph-{{ ansible_hostname }}-rgw
|
name: ceph-rgw-{{ ansible_hostname }}
|
||||||
expose: "{{ ceph_rgw_civetweb_port }}"
|
expose: "{{ ceph_rgw_civetweb_port }}"
|
||||||
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
||||||
state: running
|
state: running
|
||||||
|
@ -409,22 +368,13 @@
|
||||||
|
|
||||||
- name: switching from non-containerized to containerized ceph rbd-mirror
|
- name: switching from non-containerized to containerized ceph rbd-mirror
|
||||||
|
|
||||||
vars:
|
|
||||||
rbdmirror_group_name: rbd-mirrors
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ rbdmirror_group_name }}"
|
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ rbdmirror_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: stop ceph rbd mirror service
|
- name: stop ceph rbd mirror service
|
||||||
service:
|
service:
|
||||||
|
@ -460,7 +410,7 @@
|
||||||
- name: start ceph rbd mirror container image
|
- name: start ceph rbd mirror container image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-rbd-mirror-{{ ansible_hostname }}"
|
||||||
net: host
|
net: host
|
||||||
state: running
|
state: running
|
||||||
volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
volumes: "/etc/ceph:/etc/ceph,/etc/localtime:/etc/localtime:ro"
|
||||||
|
@ -468,22 +418,13 @@
|
||||||
|
|
||||||
- name: switching from non-containerized to containerized ceph nfs
|
- name: switching from non-containerized to containerized ceph nfs
|
||||||
|
|
||||||
vars:
|
|
||||||
nfs_group_name: nfss
|
|
||||||
|
|
||||||
hosts:
|
hosts:
|
||||||
- "{{ nfs_group_name }}"
|
- "{{ nfs_group_name|default('nfss') }}"
|
||||||
|
|
||||||
serial: 1
|
serial: 1
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- include_vars: roles/ceph-common/defaults/main.yml
|
|
||||||
- include_vars: roles/ceph-nfs/defaults/main.yml
|
|
||||||
- include_vars: group_vars/all.yml
|
|
||||||
failed_when: false
|
|
||||||
- include_vars: group_vars/{{ nfs_group_name }}.yml
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: stop ceph nfs service
|
- name: stop ceph nfs service
|
||||||
service:
|
service:
|
||||||
|
@ -519,7 +460,7 @@
|
||||||
- name: start ceph nfs container image
|
- name: start ceph nfs container image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}"
|
name: "ceph-nfs-{{ ansible_hostname }}"
|
||||||
net: "host"
|
net: "host"
|
||||||
state: "running"
|
state: "running"
|
||||||
privileged: true
|
privileged: true
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
|
|
||||||
- name: get the name of the existing ceph cluster
|
- name: get the name of the existing ceph cluster
|
||||||
shell: |
|
shell: |
|
||||||
grep -lE '\[global\]|fsid' /etc/ceph/*.conf
|
basename $(grep -R fsid /etc/ceph/ | egrep -o '^[^.]*')
|
||||||
changed_when: false
|
changed_when: false
|
||||||
register: ceph_conf
|
register: ceph_conf
|
||||||
|
|
||||||
|
|
|
@ -285,8 +285,8 @@ mds_max_mds: 3
|
||||||
#
|
#
|
||||||
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
|
#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
|
||||||
radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
|
radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
|
||||||
radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
|
radosgw_civetweb_port: 8080
|
||||||
radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}"
|
radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]"
|
||||||
radosgw_civetweb_num_threads: 50
|
radosgw_civetweb_num_threads: 50
|
||||||
# For additional civetweb configuration options available such as SSL, logging,
|
# For additional civetweb configuration options available such as SSL, logging,
|
||||||
# keepalive, and timeout settings, please see the civetweb docs at
|
# keepalive, and timeout settings, please see the civetweb docs at
|
||||||
|
@ -375,7 +375,6 @@ os_tuning_params:
|
||||||
- { name: kernel.pid_max, value: 4194303 }
|
- { name: kernel.pid_max, value: 4194303 }
|
||||||
- { name: fs.file-max, value: 26234859 }
|
- { name: fs.file-max, value: 26234859 }
|
||||||
- { name: vm.zone_reclaim_mode, value: 0 }
|
- { name: vm.zone_reclaim_mode, value: 0 }
|
||||||
- { name: vm.vfs_cache_pressure, value: 50 }
|
|
||||||
- { name: vm.swappiness, value: 10 }
|
- { name: vm.swappiness, value: 10 }
|
||||||
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
|
- { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
|
||||||
|
|
||||||
|
@ -401,3 +400,7 @@ mon_containerized_default_ceph_conf_with_kv: false
|
||||||
nfs_file_gw: true
|
nfs_file_gw: true
|
||||||
# Set this to true to enable Object access via NFS. Requires an RGW role.
|
# Set this to true to enable Object access via NFS. Requires an RGW role.
|
||||||
nfs_obj_gw: false
|
nfs_obj_gw: false
|
||||||
|
|
||||||
|
# this is only here for usage with the rolling_update.yml playbook
|
||||||
|
# do not ever change this here
|
||||||
|
rolling_update: false
|
||||||
|
|
|
@ -14,12 +14,12 @@
|
||||||
msg: "Distribution not supported {{ ansible_os_family }}"
|
msg: "Distribution not supported {{ ansible_os_family }}"
|
||||||
when: "'{{ ansible_os_family }}' not in ['Debian', 'RedHat']"
|
when: "'{{ ansible_os_family }}' not in ['Debian', 'RedHat']"
|
||||||
|
|
||||||
- name: fail on unsupported distribution for red hat storage
|
- name: fail on unsupported distribution for red hat ceph storage
|
||||||
fail:
|
fail:
|
||||||
msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL 7.1"
|
msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL 7"
|
||||||
when:
|
when:
|
||||||
- ceph_rhcs
|
- ceph_rhcs
|
||||||
- ansible_distribution_version | version_compare('7.1', '<')
|
- ansible_distribution_version | version_compare('7.3', '<')
|
||||||
|
|
||||||
- name: fail on unsupported distribution for ubuntu cloud archive
|
- name: fail on unsupported distribution for ubuntu cloud archive
|
||||||
fail:
|
fail:
|
||||||
|
|
|
@ -5,6 +5,9 @@
|
||||||
always_run: yes
|
always_run: yes
|
||||||
register: ceph_version
|
register: ceph_version
|
||||||
|
|
||||||
|
# this task shouldn't run in a rolling_update situation
|
||||||
|
# because it blindly picks a mon, which may be down because
|
||||||
|
# of the rolling update
|
||||||
- name: is ceph running already?
|
- name: is ceph running already?
|
||||||
command: ceph --connect-timeout 3 --cluster {{ cluster }} fsid
|
command: ceph --connect-timeout 3 --cluster {{ cluster }} fsid
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -12,6 +15,14 @@
|
||||||
always_run: yes
|
always_run: yes
|
||||||
register: ceph_current_fsid
|
register: ceph_current_fsid
|
||||||
delegate_to: "{{ groups[mon_group_name][0] }}"
|
delegate_to: "{{ groups[mon_group_name][0] }}"
|
||||||
|
when: not rolling_update
|
||||||
|
|
||||||
|
# set this as a default when performing a rolling_update
|
||||||
|
# so the rest of the tasks here will succeed
|
||||||
|
- set_fact:
|
||||||
|
ceph_current_fsid:
|
||||||
|
rc: 1
|
||||||
|
when: rolling_update
|
||||||
|
|
||||||
- name: create a local fetch directory if it does not exist
|
- name: create a local fetch directory if it does not exist
|
||||||
local_action: file path={{ fetch_directory }} state=directory
|
local_action: file path={{ fetch_directory }} state=directory
|
||||||
|
@ -22,7 +33,8 @@
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
fsid: "{{ ceph_current_fsid.stdout }}"
|
fsid: "{{ ceph_current_fsid.stdout }}"
|
||||||
when: ceph_current_fsid.rc == 0
|
when:
|
||||||
|
- ceph_current_fsid.rc == 0
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
monitor_name: "{{ ansible_hostname }}"
|
monitor_name: "{{ ansible_hostname }}"
|
||||||
|
|
|
@ -1,22 +1,33 @@
|
||||||
---
|
---
|
||||||
- name: create ceph conf directory
|
- name: create ceph conf directory and assemble directory
|
||||||
file:
|
file:
|
||||||
path: /etc/ceph
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "ceph"
|
owner: "ceph"
|
||||||
group: "ceph"
|
group: "ceph"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
with_items:
|
||||||
|
- /etc/ceph/
|
||||||
|
- /etc/ceph/ceph.d/
|
||||||
|
|
||||||
- name: "generate ceph configuration file: {{ cluster }}.conf"
|
- name: "generate ceph configuration file: {{ cluster }}.conf"
|
||||||
action: config_template
|
action: config_template
|
||||||
args:
|
args:
|
||||||
src: ceph.conf.j2
|
src: ceph.conf.j2
|
||||||
dest: /etc/ceph/{{ cluster }}.conf
|
dest: /etc/ceph/ceph.d/{{ cluster }}.conf
|
||||||
owner: "ceph"
|
owner: "ceph"
|
||||||
group: "ceph"
|
group: "ceph"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
config_overrides: "{{ ceph_conf_overrides }}"
|
config_overrides: "{{ ceph_conf_overrides }}"
|
||||||
config_type: ini
|
config_type: ini
|
||||||
|
|
||||||
|
- name: assemble {{ cluster }}.conf and fragments
|
||||||
|
assemble:
|
||||||
|
src: /etc/ceph/ceph.d/
|
||||||
|
dest: /etc/ceph/{{ cluster }}.conf
|
||||||
|
owner: "ceph"
|
||||||
|
group: "ceph"
|
||||||
|
mode: "0644"
|
||||||
notify:
|
notify:
|
||||||
- restart ceph mons
|
- restart ceph mons
|
||||||
- restart ceph osds
|
- restart ceph osds
|
||||||
|
|
|
@ -18,17 +18,16 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: ceph_stable
|
when: ceph_stable
|
||||||
|
|
||||||
# we must use curl instead of ansible's uri module because SNI support in
|
- name: fetch ceph development repository sources list file
|
||||||
# Python is only available in 2.7.9 and later, and most supported distributions
|
uri:
|
||||||
# don't have that version, so a request to https fails.
|
url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_lsb.codename }}/repo
|
||||||
- name : fetch ceph development repository sources list file
|
return_content: yes
|
||||||
command: "curl -L https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_lsb.codename }}/repo"
|
|
||||||
register: ceph_dev_deb_repo
|
register: ceph_dev_deb_repo
|
||||||
when: ceph_dev
|
when: ceph_dev
|
||||||
|
|
||||||
- name: add ceph development repository
|
- name: add ceph development repository
|
||||||
apt_repository:
|
apt_repository:
|
||||||
repo: "{{ ceph_dev_deb_repo.stdout }}"
|
repo: "{{ ceph_dev_deb_repo.content }}"
|
||||||
state: present
|
state: present
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: ceph_dev
|
when: ceph_dev
|
||||||
|
|
|
@ -53,7 +53,7 @@
|
||||||
|
|
||||||
- name: install NFS gateway
|
- name: install NFS gateway
|
||||||
apt:
|
apt:
|
||||||
pkg: nfs-ganesha
|
pkg: nfs-ganesha-fsal
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
update_cache: yes
|
update_cache: yes
|
||||||
when: nfs_group_name in group_names
|
when: nfs_group_name in group_names
|
||||||
|
|
|
@ -87,6 +87,6 @@
|
||||||
|
|
||||||
- name: install red hat storage nfs gateway
|
- name: install red hat storage nfs gateway
|
||||||
apt:
|
apt:
|
||||||
name: nfs-ganesha
|
name: nfs-ganesha-fsal
|
||||||
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
|
||||||
when: nfs_group_name in group_names
|
when: nfs_group_name in group_names
|
||||||
|
|
|
@ -6,30 +6,25 @@
|
||||||
when: ceph_stable
|
when: ceph_stable
|
||||||
|
|
||||||
- name: add ceph stable repository
|
- name: add ceph stable repository
|
||||||
package:
|
yum_repository:
|
||||||
name: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm"
|
name: ceph_stable
|
||||||
|
description: Ceph Stable repo
|
||||||
|
gpgcheck: yes
|
||||||
state: present
|
state: present
|
||||||
changed_when: false
|
gpgkey: "{{ ceph_stable_key }}"
|
||||||
|
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/$basearch"
|
||||||
when: ceph_stable
|
when: ceph_stable
|
||||||
|
|
||||||
- name: change download url to ceph mirror
|
|
||||||
replace:
|
|
||||||
name: /etc/yum.repos.d/ceph.repo
|
|
||||||
regexp: http://download.ceph.com
|
|
||||||
replace: "{{ ceph_mirror }}"
|
|
||||||
when: ceph_mirror != "http://download.ceph.com"
|
|
||||||
|
|
||||||
# we must use curl instead of ansible's uri module because SNI support in
|
|
||||||
# Python is only available in 2.7.9 and later, and most supported distributions
|
|
||||||
# don't have that version, so a request to https fails.
|
|
||||||
- name: fetch ceph development repo file
|
- name: fetch ceph development repo file
|
||||||
command: 'curl -L https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo'
|
uri:
|
||||||
|
url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo
|
||||||
|
return_content: yes
|
||||||
register: ceph_dev_yum_repo
|
register: ceph_dev_yum_repo
|
||||||
when: ceph_dev
|
when: ceph_dev
|
||||||
|
|
||||||
- name: add ceph development repository
|
- name: add ceph development repository
|
||||||
copy:
|
copy:
|
||||||
content: "{{ ceph_dev_yum_repo.stdout }}"
|
content: "{{ ceph_dev_yum_repo.content }}"
|
||||||
dest: /etc/yum.repos.d/ceph-dev.repo
|
dest: /etc/yum.repos.d/ceph-dev.repo
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: disable transparent hugepage
|
- name: disable transparent hugepage
|
||||||
command: "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
|
shell: |
|
||||||
|
echo never > /sys/kernel/mm/transparent_hugepage/enabled
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: disable_transparent_hugepage
|
when: disable_transparent_hugepage
|
||||||
|
|
|
@ -1,3 +1,9 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
ceph_docker_registry: docker.io
|
ceph_docker_registry: docker.io
|
||||||
|
ceph_docker_enable_centos_extra_repo: false
|
||||||
|
|
||||||
|
# Set uid/gid to default '64045' for bootstrap directories.
|
||||||
|
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
|
||||||
|
# These values have to be set according to the base OS used by the container image, NOT the host.
|
||||||
|
bootstrap_dirs_owner: "64045"
|
||||||
|
bootstrap_dirs_group: "64045"
|
||||||
|
|
|
@ -1,2 +1,23 @@
|
||||||
---
|
---
|
||||||
- include: system_checks.yml
|
- include: system_checks.yml
|
||||||
|
|
||||||
|
- name: check if it is atomic host
|
||||||
|
stat: path=/run/ostree-booted
|
||||||
|
register: stat_ostree
|
||||||
|
always_run: true
|
||||||
|
|
||||||
|
- name: set fact for using atomic host
|
||||||
|
set_fact:
|
||||||
|
is_atomic: '{{ stat_ostree.stat.exists }}'
|
||||||
|
|
||||||
|
- include: ./pre_requisites/prerequisites.yml
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
# NOTE(guits): would be nice to refact this block with L39-45 in roles/ceph-common/tasks/facts.yml
|
||||||
|
- set_fact:
|
||||||
|
monitor_name: "{{ ansible_hostname }}"
|
||||||
|
when: not mon_use_fqdn
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
monitor_name: "{{ ansible_fqdn }}"
|
||||||
|
when: mon_use_fqdn
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
---
|
||||||
|
# To install docker on debian
|
||||||
|
- name: allow apt to use a repository over https (debian)
|
||||||
|
package:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
with_items:
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- software-properties-common
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: add docker's gpg key
|
||||||
|
apt_key:
|
||||||
|
url: https://apt.dockerproject.org/gpg
|
||||||
|
state: present
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
|
||||||
|
- name: add docker and debian testing repository
|
||||||
|
apt_repository:
|
||||||
|
repo: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
with_items:
|
||||||
|
- "deb https://apt.dockerproject.org/repo/ debian-{{ ansible_distribution_release }} main"
|
||||||
|
- "deb http://http.us.debian.org/debian/ testing contrib main"
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
|
||||||
|
- name: install pip from testing on debian
|
||||||
|
package:
|
||||||
|
name: python-pip
|
||||||
|
state: present
|
||||||
|
default_release: testing
|
||||||
|
update_cache: yes
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install docker-py via pip for debian
|
||||||
|
pip:
|
||||||
|
name: docker-py
|
||||||
|
state: latest
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
|
||||||
|
- name: install docker on debian
|
||||||
|
package:
|
||||||
|
name: docker-engine
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
||||||
|
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
||||||
|
- name: install six via pip
|
||||||
|
pip:
|
||||||
|
name: six
|
||||||
|
version: 1.9.0
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
|
@ -0,0 +1,80 @@
|
||||||
|
---
|
||||||
|
# Manage debian in a separate file because of specificities
|
||||||
|
- include: debian_prerequisites.yml
|
||||||
|
when: ansible_distribution == 'Debian'
|
||||||
|
|
||||||
|
- name: install docker on ubuntu
|
||||||
|
package:
|
||||||
|
name: docker.io
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
when: ansible_distribution == 'Ubuntu'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
# ensure extras enabled for docker
|
||||||
|
- name: enable extras on centos
|
||||||
|
yum_repository:
|
||||||
|
name: extras
|
||||||
|
state: present
|
||||||
|
enabled: yes
|
||||||
|
when:
|
||||||
|
- ansible_distribution == 'CentOS'
|
||||||
|
- ceph_docker_enable_centos_extra_repo
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install python-six
|
||||||
|
package:
|
||||||
|
name: python-six
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
when: ansible_distribution != 'Debian'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install python-docker-py on red hat / centos
|
||||||
|
package:
|
||||||
|
name: python-docker-py
|
||||||
|
state: present
|
||||||
|
when: ansible_os_family == 'RedHat'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install python-docker on ubuntu
|
||||||
|
package:
|
||||||
|
name: python-docker
|
||||||
|
state: present
|
||||||
|
when: ansible_distribution == 'Ubuntu'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install docker on red hat / centos
|
||||||
|
package:
|
||||||
|
name: docker
|
||||||
|
state: present
|
||||||
|
when: ansible_os_family == 'RedHat'
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: pause after docker install before starting (on openstack vms)
|
||||||
|
pause: seconds=5
|
||||||
|
when: ceph_docker_on_openstack
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: start docker service
|
||||||
|
service:
|
||||||
|
name: docker
|
||||||
|
state: started
|
||||||
|
enabled: yes
|
||||||
|
tags:
|
||||||
|
with_pkg
|
||||||
|
|
||||||
|
- name: install ntp
|
||||||
|
package:
|
||||||
|
name: ntp
|
||||||
|
state: present
|
||||||
|
when: ntp_service_enabled
|
||||||
|
tags:
|
||||||
|
with_pkg
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -6,23 +6,11 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
always_run: true
|
always_run: true
|
||||||
|
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
register: stat_ostree
|
|
||||||
always_run: true
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: checks.yml
|
- include: checks.yml
|
||||||
when:
|
when:
|
||||||
- ceph_health.rc != 0
|
- ceph_health.rc != 0
|
||||||
- not "{{ rolling_update | default(false) }}"
|
- not "{{ rolling_update | default(false) }}"
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
|
|
@ -1,144 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# install epel for pip
|
|
||||||
- name: install epel on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- epel-release
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: enable extras repo for centos
|
|
||||||
yum_repository:
|
|
||||||
name: extras
|
|
||||||
state: present
|
|
||||||
enabled: yes
|
|
||||||
when: ansible_distribution == 'CentOS'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install docker-engine on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# for CentOS
|
|
||||||
- name: install docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# docker package could be docker-enginer or docker
|
|
||||||
- name: install pip and docker on redhat
|
|
||||||
dnf:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "dnf"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -4,8 +4,8 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% if not mds_containerized_deployment_with_kv -%}
|
{% if not mds_containerized_deployment_with_kv -%}
|
||||||
-v /var/lib/ceph:/var/lib/ceph \
|
-v /var/lib/ceph:/var/lib/ceph \
|
||||||
|
@ -13,15 +13,15 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
|
-e KV_PORT={{kv_port}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
|
||||||
-e CEPH_DAEMON=MDS \
|
-e CEPH_DAEMON=MDS \
|
||||||
-e CEPHFS_CREATE=1 \
|
-e CEPHFS_CREATE=1 \
|
||||||
{{ ceph_mds_docker_extra_env }} \
|
{{ ceph_mds_docker_extra_env }} \
|
||||||
--name={{ ansible_hostname }} \
|
--name=ceph-mds-{{ ansible_hostname }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -34,6 +34,9 @@ secure_cluster_flags:
|
||||||
# Enable the Calamari-backed REST API on a Monitor
|
# Enable the Calamari-backed REST API on a Monitor
|
||||||
calamari: false
|
calamari: false
|
||||||
|
|
||||||
|
# Enable debugging for Calamari
|
||||||
|
calamari_debug: false
|
||||||
|
|
||||||
#############
|
#############
|
||||||
# OPENSTACK #
|
# OPENSTACK #
|
||||||
#############
|
#############
|
||||||
|
@ -66,7 +69,7 @@ openstack_keys:
|
||||||
##########
|
##########
|
||||||
# DOCKER #
|
# DOCKER #
|
||||||
##########
|
##########
|
||||||
|
docker_exec_cmd:
|
||||||
mon_containerized_deployment: false
|
mon_containerized_deployment: false
|
||||||
mon_containerized_deployment_with_kv: false
|
mon_containerized_deployment_with_kv: false
|
||||||
# This is currently in ceph-common defaults because it is shared with ceph-nfs
|
# This is currently in ceph-common defaults because it is shared with ceph-nfs
|
||||||
|
|
|
@ -6,5 +6,16 @@
|
||||||
tags:
|
tags:
|
||||||
- package-install
|
- package-install
|
||||||
|
|
||||||
|
- name: increase calamari logging level when debug is on
|
||||||
|
ini_file:
|
||||||
|
dest: /etc/calamari/calamari.conf
|
||||||
|
section: "{{ item }}"
|
||||||
|
option: log_level
|
||||||
|
value: DEBUG
|
||||||
|
with_items:
|
||||||
|
- cthulhu
|
||||||
|
- calamari_web
|
||||||
|
when: calamari_debug
|
||||||
|
|
||||||
- name: initialize the calamari server api
|
- name: initialize the calamari server api
|
||||||
command: calamari-ctl initialize
|
command: calamari-ctl initialize
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: collect admin and bootstrap keys (for or after kraken release)
|
- name: collect admin and bootstrap keys
|
||||||
command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
|
command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
|
||||||
args:
|
args:
|
||||||
creates: /etc/ceph/{{ cluster }}.client.admin.keyring
|
creates: /etc/ceph/{{ cluster }}.client.admin.keyring
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
always_run: true
|
always_run: true
|
||||||
when:
|
when:
|
||||||
- cephx
|
- cephx
|
||||||
- ceph_release_num.{{ ceph_release }} > ceph_release_num.jewel
|
|
||||||
# NOTE (leseb): wait for mon discovery and quorum resolution
|
# NOTE (leseb): wait for mon discovery and quorum resolution
|
||||||
# the admin key is not instantaneously created so we have to wait a bit
|
# the admin key is not instantaneously created so we have to wait a bit
|
||||||
- name: "wait for {{ cluster }}.client.admin.keyring exists"
|
- name: "wait for {{ cluster }}.client.admin.keyring exists"
|
||||||
|
@ -42,44 +42,7 @@
|
||||||
- cephx
|
- cephx
|
||||||
- groups[restapi_group_name] is defined
|
- groups[restapi_group_name] is defined
|
||||||
|
|
||||||
# NOTE(leseb): we add a conditional for backward compatibility
|
- include: set_osd_pool_default_pg_num.yml
|
||||||
# so people that had 'pool_default_pg_num' declared will get
|
|
||||||
# the same behaviour
|
|
||||||
#
|
|
||||||
- name: check if does global key exist in ceph_conf_overrides
|
|
||||||
set_fact:
|
|
||||||
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
|
|
||||||
|
|
||||||
- name: check if ceph_conf_overrides.global.osd_pool_default_pg_num is set
|
|
||||||
set_fact:
|
|
||||||
osd_pool_default_pg_num_in_overrides: "{{ 'osd_pool_default_pg_num' in ceph_conf_overrides.global }}"
|
|
||||||
when: global_in_ceph_conf_overrides
|
|
||||||
|
|
||||||
- name: get default value for osd_pool_default_pg_num
|
|
||||||
shell: |
|
|
||||||
ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num | grep -Po '(?<="osd_pool_default_pg_num": ")[^"]*'
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
run_once: true
|
|
||||||
register: default_pool_default_pg_num
|
|
||||||
when: (pool_default_pg_num is not defined or not global_in_ceph_conf_overrides)
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
|
|
||||||
when: pool_default_pg_num is defined
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
|
|
||||||
when:
|
|
||||||
- pool_default_pg_num is not defined
|
|
||||||
- default_pool_default_pg_num.rc == 0
|
|
||||||
- (osd_pool_default_pg_num_in_overrides is not defined or not osd_pool_default_pg_num_in_overrides)
|
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
|
|
||||||
when:
|
|
||||||
- global_in_ceph_conf_overrides
|
|
||||||
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
|
|
||||||
|
|
||||||
- name: test if rbd exists
|
- name: test if rbd exists
|
||||||
command: ceph --cluster {{ cluster }} osd pool stats rbd
|
command: ceph --cluster {{ cluster }} osd pool stats rbd
|
||||||
|
@ -103,7 +66,9 @@
|
||||||
- ceph_conf_overrides.global.osd_pool_default_size is defined
|
- ceph_conf_overrides.global.osd_pool_default_size is defined
|
||||||
|
|
||||||
- include: openstack_config.yml
|
- include: openstack_config.yml
|
||||||
when: openstack_config
|
when:
|
||||||
|
- openstack_config
|
||||||
|
- inventory_hostname == groups.mons|last
|
||||||
|
|
||||||
- name: find ceph keys
|
- name: find ceph keys
|
||||||
shell: ls -1 /etc/ceph/*.keyring
|
shell: ls -1 /etc/ceph/*.keyring
|
||||||
|
|
|
@ -10,21 +10,28 @@
|
||||||
- cephfs_metadata
|
- cephfs_metadata
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
|
- name: check if ceph filesystem already exists
|
||||||
|
command: ceph --cluster {{ cluster }} fs get {{ cephfs }}
|
||||||
|
register: check_existing_cephfs
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
- name: create ceph filesystem
|
- name: create ceph filesystem
|
||||||
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
|
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
when: check_existing_cephfs.rc != 0
|
||||||
|
|
||||||
- name: allow multimds
|
- name: allow multimds
|
||||||
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it
|
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
|
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
|
||||||
- mds_allow_multimds
|
- mds_allow_multimds
|
||||||
|
|
||||||
- name: set max_mds
|
- name: set max_mds
|
||||||
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}
|
command: ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
|
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
|
||||||
- mds_allow_multimds
|
- mds_allow_multimds
|
||||||
- mds_max_mds > 1
|
- mds_max_mds > 1
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -6,24 +6,12 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
always_run: true
|
always_run: true
|
||||||
|
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
register: stat_ostree
|
|
||||||
always_run: true
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: checks.yml
|
- include: checks.yml
|
||||||
when:
|
when:
|
||||||
- ceph_health.rc != 0
|
- ceph_health.rc != 0
|
||||||
- not mon_containerized_deployment_with_kv
|
- not mon_containerized_deployment_with_kv
|
||||||
- not "{{ rolling_update | default(false) }}"
|
- not "{{ rolling_update | default(false) }}"
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
@ -56,17 +44,21 @@
|
||||||
- include: selinux.yml
|
- include: selinux.yml
|
||||||
when: ansible_os_family == 'RedHat'
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
|
- name: set docker_exec_cmd fact
|
||||||
|
set_fact:
|
||||||
|
docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
|
||||||
|
|
||||||
- include: start_docker_monitor.yml
|
- include: start_docker_monitor.yml
|
||||||
|
|
||||||
- name: wait for monitor socket to exist
|
- name: wait for monitor socket to exist
|
||||||
command: docker exec {{ ansible_hostname }} stat /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok
|
command: docker exec ceph-mon-{{ ansible_hostname }} stat /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok
|
||||||
register: monitor_socket
|
register: monitor_socket
|
||||||
retries: 5
|
retries: 5
|
||||||
delay: 10
|
delay: 15
|
||||||
until: monitor_socket.rc == 0
|
until: monitor_socket.rc == 0
|
||||||
|
|
||||||
- name: force peer addition as potential bootstrap peer for cluster bringup
|
- name: force peer addition as potential bootstrap peer for cluster bringup
|
||||||
command: docker exec {{ ansible_hostname }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok add_bootstrap_peer_hint {{ hostvars[item]['ansible_' + ceph_mon_docker_interface].ipv4.address }}
|
command: docker exec ceph-mon-{{ ansible_hostname }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok add_bootstrap_peer_hint {{ hostvars[item]['ansible_' + ceph_mon_docker_interface].ipv4.address }}
|
||||||
with_items: "{{ groups.mons }}"
|
with_items: "{{ groups.mons }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -78,7 +70,7 @@
|
||||||
when: not mon_containerized_deployment_with_kv
|
when: not mon_containerized_deployment_with_kv
|
||||||
|
|
||||||
- name: create ceph rest api keyring when mon is containerized
|
- name: create ceph rest api keyring when mon is containerized
|
||||||
command: docker exec {{ ansible_hostname }} ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
|
command: docker exec ceph-mon-{{ ansible_hostname }} ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
|
||||||
args:
|
args:
|
||||||
creates: /etc/ceph/{{ cluster }}.client.restapi.keyring
|
creates: /etc/ceph/{{ cluster }}.client.restapi.keyring
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -88,3 +80,11 @@
|
||||||
- groups[restapi_group_name] is defined
|
- groups[restapi_group_name] is defined
|
||||||
- inventory_hostname == groups.mons|last
|
- inventory_hostname == groups.mons|last
|
||||||
- not mon_containerized_deployment_with_kv
|
- not mon_containerized_deployment_with_kv
|
||||||
|
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
|
||||||
|
|
||||||
|
# create openstack pools only when all mons are up.
|
||||||
|
- include: "{{ playbook_dir }}/roles/ceph-mon/tasks/openstack_config.yml"
|
||||||
|
when:
|
||||||
|
- openstack_config
|
||||||
|
- inventory_hostname == groups.mons|last
|
||||||
|
|
|
@ -1,148 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# install epel for pip
|
|
||||||
- name: install epel-release on redhat
|
|
||||||
yum:
|
|
||||||
name: epel-release
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# ensure extras enabled for docker
|
|
||||||
- name: enable extras on centos
|
|
||||||
yum_repository:
|
|
||||||
name: extras
|
|
||||||
state: present
|
|
||||||
enabled: yes
|
|
||||||
when:
|
|
||||||
- ansible_distribution == 'CentOS'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install docker-engine on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# for CentOS
|
|
||||||
- name: install docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: install pip and docker on redhat (dnf)
|
|
||||||
dnf:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "dnf"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: pause after docker install before starting (on openstack vms)
|
|
||||||
pause: seconds=5
|
|
||||||
when: ceph_docker_on_openstack
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -15,6 +15,7 @@
|
||||||
- not mon_containerized_deployment
|
- not mon_containerized_deployment
|
||||||
- groups[mds_group_name] is defined
|
- groups[mds_group_name] is defined
|
||||||
- "{{ groups[mds_group_name]|length > 0 }}"
|
- "{{ groups[mds_group_name]|length > 0 }}"
|
||||||
|
- inventory_hostname == groups.mons|last
|
||||||
|
|
||||||
- include: secure_cluster.yml
|
- include: secure_cluster.yml
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
- name: create openstack pool
|
- name: create openstack pool
|
||||||
command: ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pg_num }}
|
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pg_num }}"
|
||||||
with_items: "{{ openstack_pools | unique }}"
|
with_items: "{{ openstack_pools | unique }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: create openstack keys
|
- name: create openstack keys
|
||||||
command: ceph --cluster {{ cluster }} auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
|
||||||
args:
|
args:
|
||||||
creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
|
||||||
with_items: "{{ openstack_keys }}"
|
with_items: "{{ openstack_keys }}"
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
# NOTE(leseb): we add a conditional for backward compatibility
|
||||||
|
# so people that had 'pool_default_pg_num' declared will get
|
||||||
|
# the same behaviour
|
||||||
|
#
|
||||||
|
- name: check if does global key exist in ceph_conf_overrides
|
||||||
|
set_fact:
|
||||||
|
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
|
||||||
|
|
||||||
|
- name: check if ceph_conf_overrides.global.osd_pool_default_pg_num is set
|
||||||
|
set_fact:
|
||||||
|
osd_pool_default_pg_num_in_overrides: "{{ 'osd_pool_default_pg_num' in ceph_conf_overrides.global }}"
|
||||||
|
when: global_in_ceph_conf_overrides
|
||||||
|
|
||||||
|
- name: get default value for osd_pool_default_pg_num
|
||||||
|
shell: |
|
||||||
|
{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num | grep -Po '(?<="osd_pool_default_pg_num": ")[^"]*'
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
register: default_pool_default_pg_num
|
||||||
|
when: pool_default_pg_num is not defined or not global_in_ceph_conf_overrides
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
|
||||||
|
when: pool_default_pg_num is defined
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
|
||||||
|
when:
|
||||||
|
- pool_default_pg_num is not defined
|
||||||
|
- default_pool_default_pg_num.rc == 0
|
||||||
|
- (osd_pool_default_pg_num_in_overrides is not defined or not osd_pool_default_pg_num_in_overrides)
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
|
||||||
|
when:
|
||||||
|
- global_in_ceph_conf_overrides
|
||||||
|
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
|
|
@ -4,9 +4,9 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker rm %i
|
ExecStartPre=-/usr/bin/docker rm ceph-mon-%i
|
||||||
ExecStartPre=$(command -v mkdir) -p /etc/ceph /var/lib/ceph/mon
|
ExecStartPre=$(command -v mkdir) -p /etc/ceph /var/lib/ceph/mon
|
||||||
ExecStart=/usr/bin/docker run --rm --name %i --net=host \
|
ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i --net=host \
|
||||||
{% if not mon_containerized_deployment_with_kv -%}
|
{% if not mon_containerized_deployment_with_kv -%}
|
||||||
-v /var/lib/ceph:/var/lib/ceph \
|
-v /var/lib/ceph:/var/lib/ceph \
|
||||||
-v /etc/ceph:/etc/ceph \
|
-v /etc/ceph:/etc/ceph \
|
||||||
|
@ -27,7 +27,7 @@ ExecStart=/usr/bin/docker run --rm --name %i --net=host \
|
||||||
-e CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }} \
|
-e CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }} \
|
||||||
{{ ceph_mon_docker_extra_env }} \
|
{{ ceph_mon_docker_extra_env }} \
|
||||||
{{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop %i
|
ExecStopPost=-/usr/bin/docker stop ceph-mon-%i
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
- name: create the nfs rgw user
|
- name: create the nfs rgw user
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: ceph-{{ ansible_hostname }}-rgw-user
|
name: ceph-rgw-user-{{ ansible_hostname }}
|
||||||
hostname: "{{ ansible_hostname }}"
|
hostname: "{{ ansible_hostname }}"
|
||||||
expose: "{{ ceph_rgw_civetweb_port }}"
|
expose: "{{ ceph_rgw_civetweb_port }}"
|
||||||
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
when: nfs_obj_gw
|
when: nfs_obj_gw
|
||||||
|
|
||||||
- name: get user create output
|
- name: get user create output
|
||||||
command: docker logs ceph-{{ ansible_hostname }}-rgw-user
|
command: docker logs ceph-rgw-user-{{ ansible_hostname }}
|
||||||
always_run: true
|
always_run: true
|
||||||
register: rgwuser
|
register: rgwuser
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -6,23 +6,11 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
always_run: true
|
always_run: true
|
||||||
|
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
register: stat_ostree
|
|
||||||
always_run: true
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: checks.yml
|
- include: checks.yml
|
||||||
when:
|
when:
|
||||||
ceph_health.rc != 0 and
|
ceph_health.rc != 0 and
|
||||||
not mon_containerized_deployment_with_kv
|
not mon_containerized_deployment_with_kv
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
|
|
@ -1,117 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: enable extras repo for centos
|
|
||||||
yum_repository:
|
|
||||||
name: extras
|
|
||||||
state: present
|
|
||||||
enabled: yes
|
|
||||||
when: ansible_distribution == 'CentOS'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
ansible_os_family == 'RedHat' and
|
|
||||||
ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on redhat
|
|
||||||
dnf:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
ansible_os_family == 'RedHat' and
|
|
||||||
ansible_pkg_mgr == "dnf"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install epel-release on redhat
|
|
||||||
yum:
|
|
||||||
name: epel-release
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: pause after docker install before starting (on openstack vms)
|
|
||||||
pause: seconds=5
|
|
||||||
when: ceph_docker_on_openstack
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -5,7 +5,7 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker rm %i
|
ExecStartPre=-/usr/bin/docker rm ceph-nfs-%i
|
||||||
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
|
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% if not mon_containerized_deployment_with_kv -%}
|
{% if not mon_containerized_deployment_with_kv -%}
|
||||||
|
@ -14,14 +14,15 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}}\
|
-e KV_IP={{kv_endpoint}}\
|
||||||
|
-e KV_PORT={{kv_port}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=NFS \
|
-e CEPH_DAEMON=NFS \
|
||||||
{{ ceph_nfs_docker_extra_env }} \
|
{{ ceph_nfs_docker_extra_env }} \
|
||||||
--name=nfs-{{ ansible_hostname }} \
|
--name=ceph-nfs-{{ ansible_hostname }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop %i
|
ExecStopPost=-/usr/bin/docker stop ceph-nfs-%i
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -51,7 +51,7 @@ copy_admin_key: false
|
||||||
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
|
# osd0 ceph_crush_root=foo ceph_crush_rack=bar
|
||||||
|
|
||||||
crush_location: false
|
crush_location: false
|
||||||
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
|
osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
|
||||||
|
|
||||||
|
|
||||||
##############
|
##############
|
||||||
|
@ -111,7 +111,7 @@ journal_collocation: false
|
||||||
|
|
||||||
# II. Second scenario: N journal devices for N OSDs
|
# II. Second scenario: N journal devices for N OSDs
|
||||||
# Use 'true' for 'raw_multi_journal' to enable this scenario
|
# Use 'true' for 'raw_multi_journal' to enable this scenario
|
||||||
# List devices under 'devices' variable above and
|
# List devices under 'devices' variable above and
|
||||||
# write journal devices for those under 'raw_journal_devices'
|
# write journal devices for those under 'raw_journal_devices'
|
||||||
# In the following example:
|
# In the following example:
|
||||||
# * sdb and sdc will get sdf as a journal
|
# * sdb and sdc will get sdf as a journal
|
||||||
|
@ -126,6 +126,11 @@ raw_multi_journal: false
|
||||||
# - /dev/sdf
|
# - /dev/sdf
|
||||||
# - /dev/sdg
|
# - /dev/sdg
|
||||||
# - /dev/sdg
|
# - /dev/sdg
|
||||||
|
#
|
||||||
|
# NOTE(leseb):
|
||||||
|
# On a containerized scenario we only support A SINGLE journal
|
||||||
|
# for all the OSDs on a given machine. If you don't, bad things will happen
|
||||||
|
# This is a limitation we plan to fix at some point.
|
||||||
raw_journal_devices: []
|
raw_journal_devices: []
|
||||||
|
|
||||||
|
|
||||||
|
@ -168,10 +173,30 @@ osd_containerized_deployment_with_kv: false
|
||||||
kv_type: etcd
|
kv_type: etcd
|
||||||
kv_endpoint: 127.0.0.1
|
kv_endpoint: 127.0.0.1
|
||||||
kv_port: 4001
|
kv_port: 4001
|
||||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
|
||||||
ceph_docker_image: "ceph/daemon"
|
ceph_docker_image: "ceph/daemon"
|
||||||
ceph_docker_image_tag: latest
|
ceph_docker_image_tag: latest
|
||||||
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
|
|
||||||
ceph_osd_docker_devices: "{{ devices }}"
|
|
||||||
ceph_docker_on_openstack: false
|
|
||||||
ceph_config_keys: [] # DON'T TOUCH ME
|
ceph_config_keys: [] # DON'T TOUCH ME
|
||||||
|
ceph_docker_on_openstack: false
|
||||||
|
|
||||||
|
# PREPARE DEVICE
|
||||||
|
# Make sure you only pass a single device to raw_journal_devices, otherwise this will fail horribly.
|
||||||
|
# This is why we use [0] in the example.
|
||||||
|
#
|
||||||
|
# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
|
||||||
|
#
|
||||||
|
# Examples:
|
||||||
|
# Journal collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
|
# Dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
|
||||||
|
# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
|
||||||
|
# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
|
||||||
|
#
|
||||||
|
ceph_osd_docker_devices: "{{ devices }}"
|
||||||
|
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||||
|
|
||||||
|
# ACTIVATE DEVICE
|
||||||
|
# Examples:
|
||||||
|
# Journal collocated or Dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||||
|
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
|
||||||
|
#
|
||||||
|
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||||
|
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -12,18 +12,6 @@
|
||||||
- not osd_containerized_deployment_with_kv
|
- not osd_containerized_deployment_with_kv
|
||||||
- not "{{ rolling_update | default(false) }}"
|
- not "{{ rolling_update | default(false) }}"
|
||||||
|
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
register: stat_ostree
|
|
||||||
always_run: true
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
|
|
@ -1,136 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install epel-release on redhat
|
|
||||||
yum:
|
|
||||||
name: epel-release
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install docker-engine on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# for CentOS
|
|
||||||
- name: install docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: install pip and docker on redhat
|
|
||||||
dnf:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "dnf"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: pause after docker install before starting (on openstack vms)
|
|
||||||
pause: seconds=5
|
|
||||||
when: ceph_docker_on_openstack
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -24,13 +24,13 @@
|
||||||
docker run --net=host \
|
docker run --net=host \
|
||||||
--pid=host \
|
--pid=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 |
|
--name="ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.0 | regex_replace('/', '') }}" \
|
||||||
regex_replace('/', '') }}" \
|
|
||||||
-v /etc/ceph:/etc/ceph \
|
-v /etc/ceph:/etc/ceph \
|
||||||
-v /var/lib/ceph/:/var/lib/ceph/ \
|
-v /var/lib/ceph/:/var/lib/ceph/ \
|
||||||
-v /dev:/dev \
|
-v /dev:/dev \
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
-e "OSD_DEVICE={{ item.0 }}" \
|
-e "OSD_DEVICE={{ item.0 }}" \
|
||||||
|
-e "OSD_JOURNAL_UUID=$(python -c "import uuid; print uuid.uuid5(uuid.NAMESPACE_DNS, '{{ ansible_machine_id }}{{ item.0 }}')")" \
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
|
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
|
||||||
{{ ceph_osd_docker_prepare_env }} \
|
{{ ceph_osd_docker_prepare_env }} \
|
||||||
"{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
"{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
|
@ -47,11 +47,11 @@
|
||||||
docker run --net=host \
|
docker run --net=host \
|
||||||
--pid=host \
|
--pid=host \
|
||||||
--privileged=true \
|
--privileged=true \
|
||||||
--name="{{ ansible_hostname }}-osd-prepare-{{ item.0 |
|
--name="ceph-osd-prepare-{{ ansible_hostname }}-dev-{{ item.0 | regex_replace('/', '') }}" \
|
||||||
regex_replace('/', '') }}" \
|
|
||||||
-v /dev:/dev \
|
-v /dev:/dev \
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
-e "OSD_DEVICE={{ item.0 }}" \
|
-e "OSD_DEVICE={{ item.0 }}" \
|
||||||
|
-e "OSD_JOURNAL_UUID=$(python -c "import uuid; print uuid.uuid5(uuid.NAMESPACE_DNS, '{{ ansible_machine_id }}{{ item.0 }}')")" \
|
||||||
-e "{{ ceph_osd_docker_prepare_env }}" \
|
-e "{{ ceph_osd_docker_prepare_env }}" \
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
|
-e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
|
@ -67,6 +67,15 @@
|
||||||
- ceph_osd_docker_prepare_env is defined
|
- ceph_osd_docker_prepare_env is defined
|
||||||
- osd_containerized_deployment_with_kv
|
- osd_containerized_deployment_with_kv
|
||||||
|
|
||||||
|
- name: generate ceph osd docker run script
|
||||||
|
become: true
|
||||||
|
template:
|
||||||
|
src: "{{ role_path }}/templates/ceph-osd-run.sh.j2"
|
||||||
|
dest: "{{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh"
|
||||||
|
owner: "root"
|
||||||
|
group: "root"
|
||||||
|
mode: "0744"
|
||||||
|
|
||||||
- name: generate systemd unit file
|
- name: generate systemd unit file
|
||||||
become: true
|
become: true
|
||||||
template:
|
template:
|
||||||
|
@ -75,18 +84,15 @@
|
||||||
owner: "root"
|
owner: "root"
|
||||||
group: "root"
|
group: "root"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: enable systemd unit file for osd instance
|
- name: enable systemd unit file for osd instance
|
||||||
shell: systemctl enable ceph-osd@{{ item | basename }}.service
|
shell: systemctl enable ceph-osd@{{ item | basename }}.service
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
changed_when: false
|
||||||
with_items: "{{ ceph_osd_docker_devices }}"
|
with_items: "{{ ceph_osd_docker_devices }}"
|
||||||
|
|
||||||
- name: reload systemd unit files
|
- name: reload systemd unit files
|
||||||
shell: systemctl daemon-reload
|
shell: systemctl daemon-reload
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: systemd start osd container
|
- name: systemd start osd container
|
||||||
service:
|
service:
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
if [[ "$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}* | wc -l)" -gt 0 ]] ; then
|
||||||
|
for part in /dev/${1}*; do
|
||||||
|
if [[ "$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID ${part} | wc -l)" -gt 0 ]]; then
|
||||||
|
DEVICES="${DEVICES} --device=/dev/disk/by-partuuid/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID ${part}) "
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# we test if the dm exist, if it does we add it to --device list
|
||||||
|
# if not we don't add it, the first activation will fail
|
||||||
|
# however the dm will be created, on the second run it'll added to the device list
|
||||||
|
# the second run will succeed
|
||||||
|
blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}1
|
||||||
|
# make sure blkid returns 0 otherwise we will test /dev/mapper/ which always exists
|
||||||
|
if [[ -e /dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}1) && "$?" -eq 0 ]]; then
|
||||||
|
DEVICES="${DEVICES} --device=/dev/disk/by-partuuid/$(blkid -t PARTLABEL="ceph lockbox" -o value -s PARTUUID /dev/${1}3) --device=/dev/${1}3 --device=/dev/mapper/control --device=/dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}2) --device=/dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}1)"
|
||||||
|
else
|
||||||
|
DEVICES="${DEVICES} --device=/dev/disk/by-partuuid/$(blkid -t PARTLABEL="ceph lockbox" -o value -s PARTUUID /dev/${1}3) --device=/dev/${1}3 --device=/dev/mapper/control --device=/dev/mapper/$(blkid -t TYPE=crypto_LUKS -o value -s PARTUUID /dev/${1}2)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
/usr/bin/docker run \
|
||||||
|
--rm \
|
||||||
|
--net=host \
|
||||||
|
--cap-add SYS_ADMIN \
|
||||||
|
--pid=host \
|
||||||
|
{% if not osd_containerized_deployment_with_kv -%}
|
||||||
|
-v /var/lib/ceph:/var/lib/ceph \
|
||||||
|
-v /etc/ceph:/etc/ceph \
|
||||||
|
{% else -%}
|
||||||
|
-e KV_TYPE={{kv_type}} \
|
||||||
|
-e KV_IP={{kv_endpoint}} \
|
||||||
|
-e KV_PORT={{kv_port}} \
|
||||||
|
{% endif -%}
|
||||||
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
|
--device=/dev/${1} \
|
||||||
|
--device=/dev/${1}1 \
|
||||||
|
{% if raw_journal_devices|length > 0 -%}
|
||||||
|
-e OSD_JOURNAL={{ raw_journal_devices[0] }} \
|
||||||
|
--device={{ raw_journal_devices[0] }} \
|
||||||
|
{% else -%}
|
||||||
|
--device=/dev/${1}2 \
|
||||||
|
{% endif -%}
|
||||||
|
--device=/dev/disk/by-partuuid/$(python -c "import uuid; f = open('/etc/machine-id', 'r').read(); print uuid.uuid5(uuid.NAMESPACE_DNS, f.strip() + '/dev/$1')") ${DEVICES} \
|
||||||
|
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
||||||
|
-e OSD_DEVICE=/dev/${1} \
|
||||||
|
{{ ceph_osd_docker_extra_env }} \
|
||||||
|
--name=ceph-osd-{{ ansible_hostname }}-dev${1} \
|
||||||
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
|
@ -1,3 +1,4 @@
|
||||||
|
# {{ ansible_managed }}
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Ceph OSD
|
Description=Ceph OSD
|
||||||
After=docker.service
|
After=docker.service
|
||||||
|
@ -5,25 +6,9 @@ After=docker.service
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
|
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
|
||||||
ExecStartPre=-/usr/bin/docker rm -f {{ ansible_hostname }}-osd-dev%i
|
ExecStartPre=-/usr/bin/docker rm -f ceph-osd-{{ ansible_hostname }}-dev%i
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host --pid=host\
|
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
|
||||||
{% if not osd_containerized_deployment_with_kv -%}
|
ExecStop=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-dev%i
|
||||||
-v /var/lib/ceph:/var/lib/ceph \
|
|
||||||
-v /etc/ceph:/etc/ceph \
|
|
||||||
{% else -%}
|
|
||||||
-e KV_TYPE={{kv_type}} \
|
|
||||||
-e KV_IP={{kv_endpoint}} \
|
|
||||||
-e KV_PORT={{kv_port}} \
|
|
||||||
{% endif -%}
|
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
|
||||||
-v /dev:/dev \
|
|
||||||
--privileged \
|
|
||||||
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
|
|
||||||
-e OSD_DEVICE=/dev/%i \
|
|
||||||
{{ ceph_osd_docker_extra_env }} \
|
|
||||||
--name={{ ansible_hostname }}-osd-dev%i \
|
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
|
||||||
ExecStop=-/usr/bin/docker stop {{ ansible_hostname }}-osd-dev%i
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -6,21 +6,9 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
always_run: true
|
always_run: true
|
||||||
|
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
always_run: true
|
|
||||||
register: stat_ostree
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic='{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: checks.yml
|
- include: checks.yml
|
||||||
when: ceph_health.rc != 0
|
when: ceph_health.rc != 0
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
|
|
@ -1,144 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# install epel for pip
|
|
||||||
- name: install epel on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- epel-release
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: enable extras repo for centos
|
|
||||||
yum_repository:
|
|
||||||
name: extras
|
|
||||||
state: present
|
|
||||||
enabled: yes
|
|
||||||
when: ansible_distribution == 'CentOS'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install docker-engine on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# for CentOS
|
|
||||||
- name: install docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# docker package could be docker-enginer or docker
|
|
||||||
- name: install pip and docker on redhat
|
|
||||||
dnf:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "dnf"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -4,21 +4,21 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% if not rbd_mirror_containerized_deployment_with_kv -%}
|
{% if not rbd_mirror_containerized_deployment_with_kv -%}
|
||||||
-v /etc/ceph:/etc/ceph \
|
-v /etc/ceph:/etc/ceph \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
|
-e KV_PORT={{kv_port}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
|
||||||
-e CEPH_DAEMON=RBD_MIRROR \
|
-e CEPH_DAEMON=RBD_MIRROR \
|
||||||
--name={{ ansible_hostname }} \
|
--name=ceph-rbd-mirror-{{ ansible_hostname }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -1,16 +1,4 @@
|
||||||
---
|
---
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
register: stat_ostree
|
|
||||||
always_run: true
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
|
|
@ -1,142 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install epel-release on redhat
|
|
||||||
yum:
|
|
||||||
name: epel-release
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: enable extras repo on centos
|
|
||||||
yum_repository:
|
|
||||||
name: extras
|
|
||||||
state: present
|
|
||||||
enabled: yes
|
|
||||||
when: ansible_distribution == 'CentOS'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install docker-engine on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# for CentOS
|
|
||||||
- name: install docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
- name: install pip and docker on redhat
|
|
||||||
dnf:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "dnf"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: pause after docker install before starting (on openstack vms)
|
|
||||||
pause: seconds=5
|
|
||||||
when: ceph_docker_on_openstack
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: run the ceph rest api docker image
|
- name: run the ceph rest api docker image
|
||||||
docker:
|
docker:
|
||||||
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
|
||||||
name: "{{ ansible_hostname }}-ceph-restapi"
|
name: "ceph-restapi-{{ ansible_hostname }}"
|
||||||
net: host
|
net: host
|
||||||
expose: "{{ ceph_restapi_port }}"
|
expose: "{{ ceph_restapi_port }}"
|
||||||
state: running
|
state: running
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: check if ceph rest api is already started
|
- name: check if ceph rest api is already started
|
||||||
shell: "pgrep ceph-rest-api"
|
shell: "pgrep -f ceph-rest-api"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
always_run: true
|
always_run: true
|
||||||
|
|
|
@ -17,11 +17,6 @@ copy_admin_key: false
|
||||||
#
|
#
|
||||||
cephx: true
|
cephx: true
|
||||||
|
|
||||||
# Used for the sudo exception while starting the radosgw process
|
|
||||||
# a new entry /etc/sudoers.d/ceph will be created
|
|
||||||
# allowing root to not require tty
|
|
||||||
radosgw_user: root
|
|
||||||
|
|
||||||
# Multi-site remote pull URL variables
|
# Multi-site remote pull URL variables
|
||||||
rgw_pull_port: "{{ radosgw_civetweb_port }}"
|
rgw_pull_port: "{{ radosgw_civetweb_port }}"
|
||||||
rgw_pull_proto: "http"
|
rgw_pull_proto: "http"
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "64045"
|
owner: "{{ bootstrap_dirs_owner }}"
|
||||||
group: "64045"
|
group: "{{ bootstrap_dirs_group }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/ceph/
|
- /etc/ceph/
|
||||||
|
|
|
@ -6,23 +6,11 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
always_run: true
|
always_run: true
|
||||||
|
|
||||||
- name: check if it is Atomic host
|
|
||||||
stat: path=/run/ostree-booted
|
|
||||||
always_run: true
|
|
||||||
register: stat_ostree
|
|
||||||
|
|
||||||
- name: set fact for using Atomic host
|
|
||||||
set_fact:
|
|
||||||
is_atomic: '{{ stat_ostree.stat.exists }}'
|
|
||||||
|
|
||||||
- include: checks.yml
|
- include: checks.yml
|
||||||
when:
|
when:
|
||||||
- ceph_health.rc != 0
|
- ceph_health.rc != 0
|
||||||
- not "{{ rolling_update | default(false) }}"
|
- not "{{ rolling_update | default(false) }}"
|
||||||
|
|
||||||
- include: pre_requisite.yml
|
|
||||||
when: not is_atomic
|
|
||||||
|
|
||||||
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
- include: "{{ playbook_dir }}/roles/ceph-common/tasks/misc/ntp_atomic.yml"
|
||||||
when:
|
when:
|
||||||
- is_atomic
|
- is_atomic
|
||||||
|
|
|
@ -1,130 +0,0 @@
|
||||||
---
|
|
||||||
- name: install pip and docker on ubuntu
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker
|
|
||||||
- docker.io
|
|
||||||
when: ansible_distribution == 'Ubuntu'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip and docker on debian
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
- docker-engine
|
|
||||||
when: ansible_distribution == 'Debian'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install epel-release on redhat
|
|
||||||
yum:
|
|
||||||
name: epel-release
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: enable extras repo on centos
|
|
||||||
yum_repository:
|
|
||||||
name: extras
|
|
||||||
state: present
|
|
||||||
enabled: yes
|
|
||||||
when: ansible_distribution == 'CentOS'
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install pip on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- python-pip
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install docker-engine on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker-engine
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# for CentOS
|
|
||||||
- name: install docker on redhat
|
|
||||||
yum:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items:
|
|
||||||
- docker
|
|
||||||
when:
|
|
||||||
- ansible_os_family == 'RedHat'
|
|
||||||
- ansible_pkg_mgr == "yum"
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
# NOTE (jimcurtis): need at least version 1.9.0 of six or we get:
|
|
||||||
# re:NameError: global name 'DEFAULT_DOCKER_API_VERSION' is not defined
|
|
||||||
- name: install six
|
|
||||||
pip:
|
|
||||||
name: six
|
|
||||||
version: 1.9.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
# NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
version: 1.1.0
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '<')
|
|
||||||
|
|
||||||
- name: install docker-py
|
|
||||||
pip:
|
|
||||||
name: docker-py
|
|
||||||
state: latest
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
when: ansible_version['full'] | version_compare('2.1.0.0', '>=')
|
|
||||||
|
|
||||||
- name: pause after docker install before starting (on openstack vms)
|
|
||||||
pause: seconds=5
|
|
||||||
when: ceph_docker_on_openstack
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: start docker service
|
|
||||||
service:
|
|
||||||
name: docker
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
||||||
|
|
||||||
- name: install ntp
|
|
||||||
package:
|
|
||||||
name: ntp
|
|
||||||
state: present
|
|
||||||
when:
|
|
||||||
- ntp_service_enabled
|
|
||||||
tags:
|
|
||||||
with_pkg
|
|
|
@ -40,12 +40,3 @@
|
||||||
group: "ceph"
|
group: "ceph"
|
||||||
mode: "0600"
|
mode: "0600"
|
||||||
when: cephx
|
when: cephx
|
||||||
|
|
||||||
- name: generate rados gateway sudoers file
|
|
||||||
template:
|
|
||||||
src: ceph.j2
|
|
||||||
dest: /etc/sudoers.d/ceph
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0400
|
|
||||||
when: ansible_distribution != "Ubuntu"
|
|
||||||
|
|
|
@ -4,8 +4,8 @@ After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/environment
|
EnvironmentFile=-/etc/environment
|
||||||
ExecStartPre=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
|
||||||
ExecStartPre=-/usr/bin/docker rm {{ ansible_hostname }}
|
ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
|
||||||
ExecStart=/usr/bin/docker run --rm --net=host \
|
ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% if not rgw_containerized_deployment_with_kv -%}
|
{% if not rgw_containerized_deployment_with_kv -%}
|
||||||
-v /var/lib/ceph:/var/lib/ceph \
|
-v /var/lib/ceph:/var/lib/ceph \
|
||||||
|
@ -13,14 +13,15 @@ ExecStart=/usr/bin/docker run --rm --net=host \
|
||||||
{% else -%}
|
{% else -%}
|
||||||
-e KV_TYPE={{kv_type}} \
|
-e KV_TYPE={{kv_type}} \
|
||||||
-e KV_IP={{kv_endpoint}} \
|
-e KV_IP={{kv_endpoint}} \
|
||||||
|
-e KV_PORT={{kv_port}} \
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
-v /etc/localtime:/etc/localtime:ro \
|
-v /etc/localtime:/etc/localtime:ro \
|
||||||
--privileged \
|
--privileged \
|
||||||
-e CEPH_DAEMON=RGW \
|
-e CEPH_DAEMON=RGW \
|
||||||
{{ ceph_rgw_docker_extra_env }} \
|
{{ ceph_rgw_docker_extra_env }} \
|
||||||
--name={{ ansible_hostname }} \
|
--name=ceph-rgw-{{ ansible_hostname }} \
|
||||||
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
|
||||||
ExecStopPost=-/usr/bin/docker stop {{ ansible_hostname }}
|
ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10s
|
RestartSec=10s
|
||||||
TimeoutStartSec=120
|
TimeoutStartSec=120
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
# {{ ansible_managed }}
|
|
||||||
Defaults:{{ radosgw_user }} !requiretty
|
|
|
@ -0,0 +1,498 @@
|
||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
require 'yaml'
|
||||||
|
require 'time'
|
||||||
|
VAGRANTFILE_API_VERSION = '2'
|
||||||
|
|
||||||
|
DEBUG = false
|
||||||
|
|
||||||
|
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
|
||||||
|
settings=YAML.load_file(config_file)
|
||||||
|
|
||||||
|
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
|
||||||
|
NMONS = settings['mon_vms']
|
||||||
|
NOSDS = settings['osd_vms']
|
||||||
|
NMDSS = settings['mds_vms']
|
||||||
|
NRGWS = settings['rgw_vms']
|
||||||
|
NNFSS = settings['nfs_vms']
|
||||||
|
RESTAPI = settings['restapi']
|
||||||
|
NRBD_MIRRORS = settings['rbd_mirror_vms']
|
||||||
|
CLIENTS = settings['client_vms']
|
||||||
|
NISCSI_GWS = settings['iscsi_gw_vms']
|
||||||
|
PUBLIC_SUBNET = settings['public_subnet']
|
||||||
|
CLUSTER_SUBNET = settings['cluster_subnet']
|
||||||
|
BOX = settings['vagrant_box']
|
||||||
|
BOX_URL = settings['vagrant_box_url']
|
||||||
|
SYNC_DIR = settings['vagrant_sync_dir']
|
||||||
|
MEMORY = settings['memory']
|
||||||
|
ETH = settings['eth']
|
||||||
|
DOCKER = settings['docker']
|
||||||
|
USER = settings['ssh_username']
|
||||||
|
|
||||||
|
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
|
||||||
|
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
|
||||||
|
DISK_UUID = Time.now.utc.to_i
|
||||||
|
|
||||||
|
|
||||||
|
ansible_provision = proc do |ansible|
|
||||||
|
if DOCKER then
|
||||||
|
ansible.playbook = 'site-docker.yml'
|
||||||
|
if settings['skip_tags']
|
||||||
|
ansible.skip_tags = settings['skip_tags']
|
||||||
|
end
|
||||||
|
else
|
||||||
|
ansible.playbook = 'site.yml'
|
||||||
|
end
|
||||||
|
|
||||||
|
# Note: Can't do ranges like mon[0-2] in groups because
|
||||||
|
# these aren't supported by Vagrant, see
|
||||||
|
# https://github.com/mitchellh/vagrant/issues/3539
|
||||||
|
ansible.groups = {
|
||||||
|
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
|
||||||
|
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
|
||||||
|
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
|
||||||
|
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
|
||||||
|
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
|
||||||
|
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
|
||||||
|
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
|
||||||
|
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
|
||||||
|
}
|
||||||
|
|
||||||
|
if RESTAPI then
|
||||||
|
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
|
||||||
|
end
|
||||||
|
|
||||||
|
ansible.extra_vars = {
|
||||||
|
cluster_network: "#{CLUSTER_SUBNET}.0/24",
|
||||||
|
journal_size: 100,
|
||||||
|
public_network: "#{PUBLIC_SUBNET}.0/24",
|
||||||
|
}
|
||||||
|
|
||||||
|
# In a production deployment, these should be secret
|
||||||
|
if DOCKER then
|
||||||
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
|
mon_containerized_deployment: 'true',
|
||||||
|
osd_containerized_deployment: 'true',
|
||||||
|
mds_containerized_deployment: 'true',
|
||||||
|
rgw_containerized_deployment: 'true',
|
||||||
|
nfs_containerized_deployment: 'true',
|
||||||
|
restapi_containerized_deployment: 'true',
|
||||||
|
rbd_mirror_containerized_deployment: 'true',
|
||||||
|
ceph_mon_docker_interface: ETH,
|
||||||
|
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
|
||||||
|
ceph_osd_docker_devices: settings['disks'],
|
||||||
|
devices: settings['disks'],
|
||||||
|
ceph_docker_on_openstack: BOX == 'openstack',
|
||||||
|
ceph_rgw_civetweb_port: 8080,
|
||||||
|
generate_fsid: 'true',
|
||||||
|
})
|
||||||
|
else
|
||||||
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
|
devices: settings['disks'],
|
||||||
|
journal_collocation: 'true',
|
||||||
|
monitor_interface: ETH,
|
||||||
|
os_tuning_params: settings['os_tuning_params'],
|
||||||
|
pool_default_size: '2',
|
||||||
|
})
|
||||||
|
end
|
||||||
|
|
||||||
|
if BOX == 'linode' then
|
||||||
|
ansible.sudo = true
|
||||||
|
# Use monitor_address_block instead of monitor_interface:
|
||||||
|
ansible.extra_vars.delete(:monitor_interface)
|
||||||
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
|
cluster_network: "#{CLUSTER_SUBNET}.0/16",
|
||||||
|
devices: ['/dev/sdc'], # hardcode leftover disk
|
||||||
|
journal_collocation: 'true',
|
||||||
|
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
|
||||||
|
public_network: "#{PUBLIC_SUBNET}.0/16",
|
||||||
|
})
|
||||||
|
end
|
||||||
|
|
||||||
|
if DEBUG then
|
||||||
|
ansible.verbose = '-vvv'
|
||||||
|
end
|
||||||
|
ansible.limit = 'all'
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_vmdk(name, size)
|
||||||
|
dir = Pathname.new(__FILE__).expand_path.dirname
|
||||||
|
path = File.join(dir, '.vagrant', name + '.vmdk')
|
||||||
|
`vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
|
||||||
|
2>&1 > /dev/null` unless File.exist?(path)
|
||||||
|
end
|
||||||
|
|
||||||
|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
|
config.vm.box = BOX
|
||||||
|
config.vm.box_url = BOX_URL
|
||||||
|
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
|
||||||
|
config.ssh.private_key_path = settings['ssh_private_key_path']
|
||||||
|
config.ssh.username = USER
|
||||||
|
|
||||||
|
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
|
||||||
|
if DISABLE_SYNCED_FOLDER
|
||||||
|
config.vm.provider :virtualbox do |v,override|
|
||||||
|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||||
|
end
|
||||||
|
config.vm.provider :libvirt do |v,override|
|
||||||
|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if BOX == 'openstack'
|
||||||
|
# OpenStack VMs
|
||||||
|
config.vm.provider :openstack do |os|
|
||||||
|
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
|
||||||
|
config.ssh.pty = true
|
||||||
|
os.openstack_auth_url = settings['os_openstack_auth_url']
|
||||||
|
os.username = settings['os_username']
|
||||||
|
os.password = settings['os_password']
|
||||||
|
os.tenant_name = settings['os_tenant_name']
|
||||||
|
os.region = settings['os_region']
|
||||||
|
os.flavor = settings['os_flavor']
|
||||||
|
os.image = settings['os_image']
|
||||||
|
os.keypair_name = settings['os_keypair_name']
|
||||||
|
os.security_groups = ['default']
|
||||||
|
|
||||||
|
if settings['os.networks'] then
|
||||||
|
os.networks = settings['os_networks']
|
||||||
|
end
|
||||||
|
|
||||||
|
if settings['os.floating_ip_pool'] then
|
||||||
|
os.floating_ip_pool = settings['os_floating_ip_pool']
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
|
||||||
|
end
|
||||||
|
elsif BOX == 'linode'
|
||||||
|
config.vm.provider :linode do |provider, override|
|
||||||
|
provider.token = ENV['LINODE_API_KEY']
|
||||||
|
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
|
||||||
|
provider.datacenter = settings['cloud_datacenter']
|
||||||
|
provider.plan = MEMORY.to_s
|
||||||
|
provider.private_networking = true
|
||||||
|
# root install generally takes <1GB
|
||||||
|
provider.xvda_size = 4*1024
|
||||||
|
# add some swap as the Linode distros require it
|
||||||
|
provider.swap_size = 128
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..CLIENTS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
|
||||||
|
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
client.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.4#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
client.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
client.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
client.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
client.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-client#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
client.vm.provider :linode do |provider|
|
||||||
|
provider.label = client.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NRGWS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
|
||||||
|
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
rgw.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.5#{i}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Virtualbox
|
||||||
|
rgw.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
rgw.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
rgw.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
rgw.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-rgw#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
rgw.vm.provider :linode do |provider|
|
||||||
|
provider.label = rgw.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NNFSS - 1).each do |i|
|
||||||
|
config.vm.define "nfs#{i}" do |nfs|
|
||||||
|
nfs.vm.hostname = "ceph-nfs#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
nfs.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.6#{i}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Virtualbox
|
||||||
|
nfs.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
nfs.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
nfs.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
nfs.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-nfs#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
nfs.vm.provider :linode do |provider|
|
||||||
|
provider.label = nfs.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NMDSS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
|
||||||
|
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
mds.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.7#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
mds.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
mds.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
mds.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
# Parallels
|
||||||
|
mds.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-mds#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
mds.vm.provider :linode do |provider|
|
||||||
|
provider.label = mds.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NRBD_MIRRORS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
|
||||||
|
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
rbd_mirror.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.8#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
rbd_mirror.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
rbd_mirror.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
rbd_mirror.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
# Parallels
|
||||||
|
rbd_mirror.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-rbd-mirror#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
rbd_mirror.vm.provider :linode do |provider|
|
||||||
|
provider.label = rbd_mirror.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NISCSI_GWS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
|
||||||
|
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
iscsi_gw.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.9#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
iscsi_gw.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
iscsi_gw.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
iscsi_gw.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
# Parallels
|
||||||
|
iscsi_gw.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-iscsi-gw#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
iscsi_gw.vm.provider :linode do |provider|
|
||||||
|
provider.label = iscsi_gw.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NMONS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
|
||||||
|
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
mon.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.1#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
mon.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
mon.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
mon.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = false
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
mon.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-mon#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
mon.vm.provider :linode do |provider|
|
||||||
|
provider.label = mon.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NOSDS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
|
||||||
|
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
osd.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.10#{i}"
|
||||||
|
osd.vm.network :private_network,
|
||||||
|
ip: "#{CLUSTER_SUBNET}.20#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
osd.vm.provider :virtualbox do |vb|
|
||||||
|
# Create our own controller for consistency and to remove VM dependency
|
||||||
|
vb.customize ['storagectl', :id,
|
||||||
|
'--name', 'OSD Controller',
|
||||||
|
'--add', 'scsi']
|
||||||
|
(0..1).each do |d|
|
||||||
|
vb.customize ['createhd',
|
||||||
|
'--filename', "disk-#{i}-#{d}",
|
||||||
|
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
|
||||||
|
vb.customize ['storageattach', :id,
|
||||||
|
'--storagectl', 'OSD Controller',
|
||||||
|
'--port', 3 + d,
|
||||||
|
'--device', 0,
|
||||||
|
'--type', 'hdd',
|
||||||
|
'--medium', "disk-#{i}-#{d}.vdi"]
|
||||||
|
end
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
osd.vm.provider :vmware_fusion do |v|
|
||||||
|
(0..1).each do |d|
|
||||||
|
v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
|
||||||
|
v.vmx["scsi0:#{d + 1}.fileName"] =
|
||||||
|
create_vmdk("disk-#{i}-#{d}", '11000MB')
|
||||||
|
end
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
driverletters = ('a'..'z').to_a
|
||||||
|
osd.vm.provider :libvirt do |lv|
|
||||||
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||||
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||||
|
(0..2).each do |d|
|
||||||
|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
|
||||||
|
end
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = false
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
osd.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-osd#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
(0..1).each do |d|
|
||||||
|
prl.customize ["set", :id,
|
||||||
|
"--device-add",
|
||||||
|
"hdd",
|
||||||
|
"--iface",
|
||||||
|
"sata"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
osd.vm.provider :linode do |provider|
|
||||||
|
provider.label = osd.vm.hostname
|
||||||
|
end
|
||||||
|
|
||||||
|
# Run the provisioner after the last machine comes up
|
||||||
|
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
# this is only here to let the CI tests know
|
||||||
|
# that this scenario is using docker
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
ceph_stable: True
|
||||||
|
mon_containerized_deployment: True
|
||||||
|
osd_containerized_deployment: True
|
||||||
|
mds_containerized_deployment: True
|
||||||
|
rgw_containerized_deployment: True
|
||||||
|
cluster: test
|
||||||
|
ceph_mon_docker_interface: eth1
|
||||||
|
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||||
|
journal_size: 100
|
||||||
|
ceph_docker_on_openstack: False
|
||||||
|
public_network: "192.168.15.0/24"
|
||||||
|
cluster_network: "192.168.16.0/24"
|
||||||
|
ceph_rgw_civetweb_port: 8080
|
||||||
|
ceph_osd_docker_devices: "{{ devices }}"
|
||||||
|
devices:
|
||||||
|
- /dev/sda
|
||||||
|
raw_journal_devices:
|
||||||
|
- /dev/sdb
|
||||||
|
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }}
|
||||||
|
ceph_osd_docker_run_script_path: /var/tmp
|
|
@ -0,0 +1,5 @@
|
||||||
|
[mons]
|
||||||
|
mon0
|
||||||
|
|
||||||
|
[osds]
|
||||||
|
osd0
|
|
@ -0,0 +1,51 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# DEPLOY CONTAINERIZED DAEMONS
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
|
mon_vms: 1
|
||||||
|
osd_vms: 1
|
||||||
|
mds_vms: 0
|
||||||
|
rgw_vms: 0
|
||||||
|
nfs_vms: 0
|
||||||
|
rbd_mirror_vms: 0
|
||||||
|
client_vms: 0
|
||||||
|
iscsi_gw_vms: 0
|
||||||
|
|
||||||
|
# Deploy RESTAPI on each of the Monitors
|
||||||
|
restapi: true
|
||||||
|
|
||||||
|
# SUBNETS TO USE FOR THE VMS
|
||||||
|
public_subnet: 192.168.15
|
||||||
|
cluster_subnet: 192.168.16
|
||||||
|
|
||||||
|
# MEMORY
|
||||||
|
# set 1024 for CentOS
|
||||||
|
memory: 1024
|
||||||
|
|
||||||
|
# Disks
|
||||||
|
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||||
|
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
|
||||||
|
# VAGRANT BOX
|
||||||
|
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||||
|
# not get updated frequently unless required for build systems. These are (for
|
||||||
|
# now):
|
||||||
|
#
|
||||||
|
# * ceph/ubuntu-xenial
|
||||||
|
#
|
||||||
|
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
|
||||||
|
# which is not available in Atomic Host.
|
||||||
|
# There are bug like this one: https://github.com/docker/docker/issues/12694
|
||||||
|
vagrant_box: centos/7
|
||||||
|
|
||||||
|
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
#vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
#vagrant_sync_dir: /
|
||||||
|
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||||
|
# the vagrant directory on the remote box regardless of the provider.
|
||||||
|
vagrant_disable_synced_folder: true
|
498
tests/functional/centos/7/docker-cluster-dmcrypt-journal-collocation/Vagrantfile
vendored
100644
498
tests/functional/centos/7/docker-cluster-dmcrypt-journal-collocation/Vagrantfile
vendored
100644
|
@ -0,0 +1,498 @@
|
||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
require 'yaml'
|
||||||
|
require 'time'
|
||||||
|
VAGRANTFILE_API_VERSION = '2'
|
||||||
|
|
||||||
|
DEBUG = false
|
||||||
|
|
||||||
|
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
|
||||||
|
settings=YAML.load_file(config_file)
|
||||||
|
|
||||||
|
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
|
||||||
|
NMONS = settings['mon_vms']
|
||||||
|
NOSDS = settings['osd_vms']
|
||||||
|
NMDSS = settings['mds_vms']
|
||||||
|
NRGWS = settings['rgw_vms']
|
||||||
|
NNFSS = settings['nfs_vms']
|
||||||
|
RESTAPI = settings['restapi']
|
||||||
|
NRBD_MIRRORS = settings['rbd_mirror_vms']
|
||||||
|
CLIENTS = settings['client_vms']
|
||||||
|
NISCSI_GWS = settings['iscsi_gw_vms']
|
||||||
|
PUBLIC_SUBNET = settings['public_subnet']
|
||||||
|
CLUSTER_SUBNET = settings['cluster_subnet']
|
||||||
|
BOX = settings['vagrant_box']
|
||||||
|
BOX_URL = settings['vagrant_box_url']
|
||||||
|
SYNC_DIR = settings['vagrant_sync_dir']
|
||||||
|
MEMORY = settings['memory']
|
||||||
|
ETH = settings['eth']
|
||||||
|
DOCKER = settings['docker']
|
||||||
|
USER = settings['ssh_username']
|
||||||
|
|
||||||
|
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
|
||||||
|
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
|
||||||
|
DISK_UUID = Time.now.utc.to_i
|
||||||
|
|
||||||
|
|
||||||
|
ansible_provision = proc do |ansible|
|
||||||
|
if DOCKER then
|
||||||
|
ansible.playbook = 'site-docker.yml'
|
||||||
|
if settings['skip_tags']
|
||||||
|
ansible.skip_tags = settings['skip_tags']
|
||||||
|
end
|
||||||
|
else
|
||||||
|
ansible.playbook = 'site.yml'
|
||||||
|
end
|
||||||
|
|
||||||
|
# Note: Can't do ranges like mon[0-2] in groups because
|
||||||
|
# these aren't supported by Vagrant, see
|
||||||
|
# https://github.com/mitchellh/vagrant/issues/3539
|
||||||
|
ansible.groups = {
|
||||||
|
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
|
||||||
|
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
|
||||||
|
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
|
||||||
|
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
|
||||||
|
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
|
||||||
|
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
|
||||||
|
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
|
||||||
|
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
|
||||||
|
}
|
||||||
|
|
||||||
|
if RESTAPI then
|
||||||
|
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
|
||||||
|
end
|
||||||
|
|
||||||
|
ansible.extra_vars = {
|
||||||
|
cluster_network: "#{CLUSTER_SUBNET}.0/24",
|
||||||
|
journal_size: 100,
|
||||||
|
public_network: "#{PUBLIC_SUBNET}.0/24",
|
||||||
|
}
|
||||||
|
|
||||||
|
# In a production deployment, these should be secret
|
||||||
|
if DOCKER then
|
||||||
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
|
mon_containerized_deployment: 'true',
|
||||||
|
osd_containerized_deployment: 'true',
|
||||||
|
mds_containerized_deployment: 'true',
|
||||||
|
rgw_containerized_deployment: 'true',
|
||||||
|
nfs_containerized_deployment: 'true',
|
||||||
|
restapi_containerized_deployment: 'true',
|
||||||
|
rbd_mirror_containerized_deployment: 'true',
|
||||||
|
ceph_mon_docker_interface: ETH,
|
||||||
|
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
|
||||||
|
ceph_osd_docker_devices: settings['disks'],
|
||||||
|
devices: settings['disks'],
|
||||||
|
ceph_docker_on_openstack: BOX == 'openstack',
|
||||||
|
ceph_rgw_civetweb_port: 8080,
|
||||||
|
generate_fsid: 'true',
|
||||||
|
})
|
||||||
|
else
|
||||||
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
|
devices: settings['disks'],
|
||||||
|
journal_collocation: 'true',
|
||||||
|
monitor_interface: ETH,
|
||||||
|
os_tuning_params: settings['os_tuning_params'],
|
||||||
|
pool_default_size: '2',
|
||||||
|
})
|
||||||
|
end
|
||||||
|
|
||||||
|
if BOX == 'linode' then
|
||||||
|
ansible.sudo = true
|
||||||
|
# Use monitor_address_block instead of monitor_interface:
|
||||||
|
ansible.extra_vars.delete(:monitor_interface)
|
||||||
|
ansible.extra_vars = ansible.extra_vars.merge({
|
||||||
|
cluster_network: "#{CLUSTER_SUBNET}.0/16",
|
||||||
|
devices: ['/dev/sdc'], # hardcode leftover disk
|
||||||
|
journal_collocation: 'true',
|
||||||
|
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
|
||||||
|
public_network: "#{PUBLIC_SUBNET}.0/16",
|
||||||
|
})
|
||||||
|
end
|
||||||
|
|
||||||
|
if DEBUG then
|
||||||
|
ansible.verbose = '-vvv'
|
||||||
|
end
|
||||||
|
ansible.limit = 'all'
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_vmdk(name, size)
|
||||||
|
dir = Pathname.new(__FILE__).expand_path.dirname
|
||||||
|
path = File.join(dir, '.vagrant', name + '.vmdk')
|
||||||
|
`vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
|
||||||
|
2>&1 > /dev/null` unless File.exist?(path)
|
||||||
|
end
|
||||||
|
|
||||||
|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
|
config.vm.box = BOX
|
||||||
|
config.vm.box_url = BOX_URL
|
||||||
|
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
|
||||||
|
config.ssh.private_key_path = settings['ssh_private_key_path']
|
||||||
|
config.ssh.username = USER
|
||||||
|
|
||||||
|
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
|
||||||
|
if DISABLE_SYNCED_FOLDER
|
||||||
|
config.vm.provider :virtualbox do |v,override|
|
||||||
|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||||
|
end
|
||||||
|
config.vm.provider :libvirt do |v,override|
|
||||||
|
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if BOX == 'openstack'
|
||||||
|
# OpenStack VMs
|
||||||
|
config.vm.provider :openstack do |os|
|
||||||
|
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
|
||||||
|
config.ssh.pty = true
|
||||||
|
os.openstack_auth_url = settings['os_openstack_auth_url']
|
||||||
|
os.username = settings['os_username']
|
||||||
|
os.password = settings['os_password']
|
||||||
|
os.tenant_name = settings['os_tenant_name']
|
||||||
|
os.region = settings['os_region']
|
||||||
|
os.flavor = settings['os_flavor']
|
||||||
|
os.image = settings['os_image']
|
||||||
|
os.keypair_name = settings['os_keypair_name']
|
||||||
|
os.security_groups = ['default']
|
||||||
|
|
||||||
|
if settings['os.networks'] then
|
||||||
|
os.networks = settings['os_networks']
|
||||||
|
end
|
||||||
|
|
||||||
|
if settings['os.floating_ip_pool'] then
|
||||||
|
os.floating_ip_pool = settings['os_floating_ip_pool']
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
|
||||||
|
end
|
||||||
|
elsif BOX == 'linode'
|
||||||
|
config.vm.provider :linode do |provider, override|
|
||||||
|
provider.token = ENV['LINODE_API_KEY']
|
||||||
|
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
|
||||||
|
provider.datacenter = settings['cloud_datacenter']
|
||||||
|
provider.plan = MEMORY.to_s
|
||||||
|
provider.private_networking = true
|
||||||
|
# root install generally takes <1GB
|
||||||
|
provider.xvda_size = 4*1024
|
||||||
|
# add some swap as the Linode distros require it
|
||||||
|
provider.swap_size = 128
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..CLIENTS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
|
||||||
|
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
client.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.4#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
client.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
client.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
client.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
client.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-client#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
client.vm.provider :linode do |provider|
|
||||||
|
provider.label = client.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NRGWS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
|
||||||
|
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
rgw.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.5#{i}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Virtualbox
|
||||||
|
rgw.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
rgw.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
rgw.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
rgw.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-rgw#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
rgw.vm.provider :linode do |provider|
|
||||||
|
provider.label = rgw.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NNFSS - 1).each do |i|
|
||||||
|
config.vm.define "nfs#{i}" do |nfs|
|
||||||
|
nfs.vm.hostname = "ceph-nfs#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
nfs.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.6#{i}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Virtualbox
|
||||||
|
nfs.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
nfs.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
nfs.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
nfs.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-nfs#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
nfs.vm.provider :linode do |provider|
|
||||||
|
provider.label = nfs.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NMDSS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
|
||||||
|
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
mds.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.7#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
mds.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
mds.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
mds.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
# Parallels
|
||||||
|
mds.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-mds#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
mds.vm.provider :linode do |provider|
|
||||||
|
provider.label = mds.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NRBD_MIRRORS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
|
||||||
|
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
rbd_mirror.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.8#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
rbd_mirror.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
rbd_mirror.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
rbd_mirror.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
# Parallels
|
||||||
|
rbd_mirror.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-rbd-mirror#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
rbd_mirror.vm.provider :linode do |provider|
|
||||||
|
provider.label = rbd_mirror.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NISCSI_GWS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
|
||||||
|
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
iscsi_gw.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.9#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
iscsi_gw.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
iscsi_gw.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
iscsi_gw.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = true
|
||||||
|
end
|
||||||
|
# Parallels
|
||||||
|
iscsi_gw.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-iscsi-gw#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
iscsi_gw.vm.provider :linode do |provider|
|
||||||
|
provider.label = iscsi_gw.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NMONS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
|
||||||
|
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
mon.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.1#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
mon.vm.provider :virtualbox do |vb|
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
mon.vm.provider :vmware_fusion do |v|
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
mon.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = false
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
mon.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-mon#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
mon.vm.provider :linode do |provider|
|
||||||
|
provider.label = mon.vm.hostname
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..NOSDS - 1).each do |i|
|
||||||
|
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
|
||||||
|
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
|
||||||
|
if ASSIGN_STATIC_IP
|
||||||
|
osd.vm.network :private_network,
|
||||||
|
ip: "#{PUBLIC_SUBNET}.10#{i}"
|
||||||
|
osd.vm.network :private_network,
|
||||||
|
ip: "#{CLUSTER_SUBNET}.20#{i}"
|
||||||
|
end
|
||||||
|
# Virtualbox
|
||||||
|
osd.vm.provider :virtualbox do |vb|
|
||||||
|
# Create our own controller for consistency and to remove VM dependency
|
||||||
|
vb.customize ['storagectl', :id,
|
||||||
|
'--name', 'OSD Controller',
|
||||||
|
'--add', 'scsi']
|
||||||
|
(0..1).each do |d|
|
||||||
|
vb.customize ['createhd',
|
||||||
|
'--filename', "disk-#{i}-#{d}",
|
||||||
|
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
|
||||||
|
vb.customize ['storageattach', :id,
|
||||||
|
'--storagectl', 'OSD Controller',
|
||||||
|
'--port', 3 + d,
|
||||||
|
'--device', 0,
|
||||||
|
'--type', 'hdd',
|
||||||
|
'--medium', "disk-#{i}-#{d}.vdi"]
|
||||||
|
end
|
||||||
|
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||||
|
end
|
||||||
|
|
||||||
|
# VMware
|
||||||
|
osd.vm.provider :vmware_fusion do |v|
|
||||||
|
(0..1).each do |d|
|
||||||
|
v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
|
||||||
|
v.vmx["scsi0:#{d + 1}.fileName"] =
|
||||||
|
create_vmdk("disk-#{i}-#{d}", '11000MB')
|
||||||
|
end
|
||||||
|
v.vmx['memsize'] = "#{MEMORY}"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Libvirt
|
||||||
|
driverletters = ('a'..'z').to_a
|
||||||
|
osd.vm.provider :libvirt do |lv|
|
||||||
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||||
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||||
|
(0..2).each do |d|
|
||||||
|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
|
||||||
|
end
|
||||||
|
lv.memory = MEMORY
|
||||||
|
lv.random_hostname = false
|
||||||
|
end
|
||||||
|
|
||||||
|
# Parallels
|
||||||
|
osd.vm.provider "parallels" do |prl|
|
||||||
|
prl.name = "ceph-osd#{i}"
|
||||||
|
prl.memory = "#{MEMORY}"
|
||||||
|
(0..1).each do |d|
|
||||||
|
prl.customize ["set", :id,
|
||||||
|
"--device-add",
|
||||||
|
"hdd",
|
||||||
|
"--iface",
|
||||||
|
"sata"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
osd.vm.provider :linode do |provider|
|
||||||
|
provider.label = osd.vm.hostname
|
||||||
|
end
|
||||||
|
|
||||||
|
# Run the provisioner after the last machine comes up
|
||||||
|
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
# this is only here to let the CI tests know
|
||||||
|
# that this scenario is using docker
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
ceph_stable: True
|
||||||
|
mon_containerized_deployment: True
|
||||||
|
osd_containerized_deployment: True
|
||||||
|
mds_containerized_deployment: True
|
||||||
|
rgw_containerized_deployment: True
|
||||||
|
cluster: ceph
|
||||||
|
ceph_mon_docker_interface: eth1
|
||||||
|
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||||
|
journal_size: 100
|
||||||
|
ceph_docker_on_openstack: False
|
||||||
|
public_network: "192.168.15.0/24"
|
||||||
|
cluster_network: "192.168.16.0/24"
|
||||||
|
journal_collocation: true
|
||||||
|
ceph_rgw_civetweb_port: 8080
|
||||||
|
ceph_osd_docker_devices: "{{ devices }}"
|
||||||
|
devices:
|
||||||
|
- /dev/sda
|
||||||
|
raw_journal_devices:
|
||||||
|
- /dev/sdb
|
||||||
|
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
|
||||||
|
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
|
|
@ -0,0 +1,5 @@
|
||||||
|
[mons]
|
||||||
|
mon0
|
||||||
|
|
||||||
|
[osds]
|
||||||
|
osd0
|
|
@ -0,0 +1,51 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
# DEPLOY CONTAINERIZED DAEMONS
|
||||||
|
docker: True
|
||||||
|
|
||||||
|
# DEFINE THE NUMBER OF VMS TO RUN
|
||||||
|
mon_vms: 1
|
||||||
|
osd_vms: 1
|
||||||
|
mds_vms: 0
|
||||||
|
rgw_vms: 0
|
||||||
|
nfs_vms: 0
|
||||||
|
rbd_mirror_vms: 0
|
||||||
|
client_vms: 0
|
||||||
|
iscsi_gw_vms: 0
|
||||||
|
|
||||||
|
# Deploy RESTAPI on each of the Monitors
|
||||||
|
restapi: true
|
||||||
|
|
||||||
|
# SUBNETS TO USE FOR THE VMS
|
||||||
|
public_subnet: 192.168.15
|
||||||
|
cluster_subnet: 192.168.16
|
||||||
|
|
||||||
|
# MEMORY
|
||||||
|
# set 1024 for CentOS
|
||||||
|
memory: 1024
|
||||||
|
|
||||||
|
# Disks
|
||||||
|
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||||
|
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||||
|
|
||||||
|
# VAGRANT BOX
|
||||||
|
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||||
|
# not get updated frequently unless required for build systems. These are (for
|
||||||
|
# now):
|
||||||
|
#
|
||||||
|
# * ceph/ubuntu-xenial
|
||||||
|
#
|
||||||
|
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
|
||||||
|
# which is not available in Atomic Host.
|
||||||
|
# There are bug like this one: https://github.com/docker/docker/issues/12694
|
||||||
|
vagrant_box: centos/7
|
||||||
|
|
||||||
|
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||||
|
# The sync directory changes based on vagrant box
|
||||||
|
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||||
|
#vagrant_sync_dir: /home/vagrant/sync
|
||||||
|
#vagrant_sync_dir: /
|
||||||
|
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||||
|
# the vagrant directory on the remote box regardless of the provider.
|
||||||
|
vagrant_disable_synced_folder: true
|
|
@ -21,3 +21,4 @@ ceph_osd_docker_devices: "{{ devices }}"
|
||||||
devices:
|
devices:
|
||||||
- /dev/sda
|
- /dev/sda
|
||||||
- /dev/sdb
|
- /dev/sdb
|
||||||
|
ceph_osd_docker_run_script_path: /var/tmp
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
become: yes
|
||||||
|
tags:
|
||||||
|
- vagrant_setup
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: change centos/7 vagrant box name to rhel7
|
||||||
|
replace:
|
||||||
|
regexp: "centos/7"
|
||||||
|
replace: "rhel7"
|
||||||
|
dest: "{{ change_dir }}/vagrant_variables.yml"
|
||||||
|
when: change_dir is defined
|
||||||
|
|
||||||
|
- name: change ceph/ubuntu-xenial vagrant box name to rhel7
|
||||||
|
replace:
|
||||||
|
regexp: "ceph/ubuntu-xenial"
|
||||||
|
replace: "rhel7"
|
||||||
|
dest: "{{ change_dir }}/vagrant_variables.yml"
|
||||||
|
when: change_dir is defined
|
||||||
|
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: check if it is Atomic host
|
||||||
|
stat: path=/run/ostree-booted
|
||||||
|
register: stat_ostree
|
||||||
|
always_run: true
|
||||||
|
|
||||||
|
- name: set fact for using Atomic host
|
||||||
|
set_fact:
|
||||||
|
is_atomic: '{{ stat_ostree.stat.exists }}'
|
||||||
|
|
||||||
|
- name: install nightly rhel7 repo
|
||||||
|
get_url:
|
||||||
|
url: "{{ rhel7_repo_url }}"
|
||||||
|
dest: /etc/yum.repos.d
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
when: not is_atomic
|
||||||
|
|
||||||
|
- name: set MTU on eth0
|
||||||
|
command: "ifconfig eth0 mtu 1400 up"
|
||||||
|
|
||||||
|
- name: set MTU on eth1
|
||||||
|
command: "ifconfig eth1 mtu 1400 up"
|
||||||
|
|
||||||
|
- hosts: mons
|
||||||
|
gather_facts: false
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: install ceph mon repo
|
||||||
|
yum_repository:
|
||||||
|
name: ceph-mon
|
||||||
|
description: repo for rhcs ceph-mon
|
||||||
|
baseurl: "{{ repo_url }}/MON/x86_64/os/"
|
||||||
|
gpgcheck: no
|
||||||
|
enabled: yes
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
|
||||||
|
- hosts: osds
|
||||||
|
gather_facts: false
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: install ceph osd repo
|
||||||
|
yum_repository:
|
||||||
|
name: ceph-osd
|
||||||
|
description: repo for rhcs ceph-osd
|
||||||
|
baseurl: "{{ repo_url }}/OSD/x86_64/os/"
|
||||||
|
gpgcheck: no
|
||||||
|
enabled: yes
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
||||||
|
|
||||||
|
- name: set MTU on eth2
|
||||||
|
command: "ifconfig eth2 mtu 1400 up"
|
||||||
|
|
||||||
|
- hosts: mdss:rgws:clients
|
||||||
|
gather_facts: false
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: install ceph tools repo
|
||||||
|
yum_repository:
|
||||||
|
name: ceph-osd
|
||||||
|
description: repo for rhcs ceph tools
|
||||||
|
baseurl: "{{ repo_url }}/Tools/x86_64/os/"
|
||||||
|
gpgcheck: no
|
||||||
|
enabled: yes
|
||||||
|
when:
|
||||||
|
- not is_atomic
|
|
@ -22,6 +22,7 @@ class TestMons(object):
|
||||||
)
|
)
|
||||||
assert Service(service_name).is_enabled
|
assert Service(service_name).is_enabled
|
||||||
|
|
||||||
|
@pytest.mark.no_docker
|
||||||
def test_can_get_cluster_health(self, node, Command):
|
def test_can_get_cluster_health(self, node, Command):
|
||||||
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
|
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
|
||||||
output = Command.check_output(cmd)
|
output = Command.check_output(cmd)
|
||||||
|
@ -30,6 +31,7 @@ class TestMons(object):
|
||||||
|
|
||||||
class TestOSDs(object):
|
class TestOSDs(object):
|
||||||
|
|
||||||
|
@pytest.mark.no_docker
|
||||||
def test_all_osds_are_up_and_in(self, node, Command):
|
def test_all_osds_are_up_and_in(self, node, Command):
|
||||||
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
|
cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
|
||||||
output = Command.check_output(cmd)
|
output = Command.check_output(cmd)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
class TestInstall(object):
|
class TestInstall(object):
|
||||||
|
|
||||||
|
@ -13,7 +14,8 @@ class TestInstall(object):
|
||||||
def test_ceph_conf_is_a_file(self, File, node):
|
def test_ceph_conf_is_a_file(self, File, node):
|
||||||
assert File(node["conf_path"]).is_file
|
assert File(node["conf_path"]).is_file
|
||||||
|
|
||||||
def test_ceph_command_exists(self, Command):
|
@pytest.mark.no_docker
|
||||||
|
def test_ceph_command_exists(self, Command, node):
|
||||||
assert Command.exists("ceph")
|
assert Command.exists("ceph")
|
||||||
|
|
||||||
|
|
||||||
|
|
73
tox.ini
73
tox.ini
|
@ -1,5 +1,5 @@
|
||||||
[tox]
|
[tox]
|
||||||
envlist = {ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt}
|
envlist = {,rhcs-}{ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster}
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
# extra commands for purging clusters
|
# extra commands for purging clusters
|
||||||
|
@ -9,12 +9,52 @@ skipsdist = True
|
||||||
[purge]
|
[purge]
|
||||||
commands=
|
commands=
|
||||||
cp {toxinidir}/infrastructure-playbooks/purge-cluster.yml {toxinidir}/purge-cluster.yml
|
cp {toxinidir}/infrastructure-playbooks/purge-cluster.yml {toxinidir}/purge-cluster.yml
|
||||||
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/purge-cluster.yml --extra-vars="ireallymeanit=yes fetch_directory={changedir}/fetch"
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/purge-cluster.yml -e ireallymeanit=yes --extra-vars '\
|
||||||
|
\{\
|
||||||
|
"fetch_directory":"{env:FETCH_DIRECTORY:{changedir}/fetch}",\
|
||||||
|
"ceph_rhcs":{env:CEPH_RHCS:false},\
|
||||||
|
"ceph_origin":"{env:CEPH_ORIGIN:upstream}",\
|
||||||
|
"ceph_dev":{env:CEPH_DEV:false},\
|
||||||
|
"ceph_dev_branch":"{env:CEPH_DEV_BRANCH:master}",\
|
||||||
|
"ceph_dev_sha1":"{env:CEPH_DEV_SHA1:latest}",\
|
||||||
|
"ceph_stable_release":"{env:CEPH_STABLE_RELEASE:jewel}",\
|
||||||
|
"ceph_stable":{env:CEPH_STABLE:true}\
|
||||||
|
\}'
|
||||||
# set up the cluster again
|
# set up the cluster again
|
||||||
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site.yml.sample --extra-vars="fetch_directory={changedir}/fetch"
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site.yml.sample --extra-vars '\
|
||||||
|
\{\
|
||||||
|
"fetch_directory":"{env:FETCH_DIRECTORY:{changedir}/fetch}",\
|
||||||
|
"ceph_rhcs":{env:CEPH_RHCS:false},\
|
||||||
|
"ceph_origin":"{env:CEPH_ORIGIN:upstream}",\
|
||||||
|
"ceph_dev":{env:CEPH_DEV:false},\
|
||||||
|
"ceph_dev_branch":"{env:CEPH_DEV_BRANCH:master}",\
|
||||||
|
"ceph_dev_sha1":"{env:CEPH_DEV_SHA1:latest}",\
|
||||||
|
"ceph_stable_release":"{env:CEPH_STABLE_RELEASE:jewel}",\
|
||||||
|
"ceph_stable":{env:CEPH_STABLE:true}\
|
||||||
|
\}'
|
||||||
# test that the cluster can be redeployed in a healthy state
|
# test that the cluster can be redeployed in a healthy state
|
||||||
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
|
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
|
# extra commands for performing a rolling update
|
||||||
|
# currently this hardcodes the release to kraken
|
||||||
|
# as we're still installing jewel by default
|
||||||
|
[update]
|
||||||
|
commands=
|
||||||
|
cp {toxinidir}/infrastructure-playbooks/rolling_update.yml {toxinidir}/rolling_update.yml
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rolling_update.yml -e ireallymeanit=yes --extra-vars '\
|
||||||
|
\{\
|
||||||
|
"fetch_directory":"{env:FETCH_DIRECTORY:{changedir}/fetch}",\
|
||||||
|
"ceph_rhcs":{env:CEPH_RHCS:false},\
|
||||||
|
"ceph_origin":"{env:CEPH_ORIGIN:upstream}",\
|
||||||
|
"ceph_dev":{env:CEPH_DEV:false},\
|
||||||
|
"ceph_dev_branch":"{env:UPDATE_CEPH_DEV_BRANCH:master}",\
|
||||||
|
"ceph_dev_sha1":"{env:UPDATE_CEPH_DEV_SHA1:latest}",\
|
||||||
|
"ceph_stable_release":"{env:UPDATE_CEPH_STABLE_RELEASE:kraken}",\
|
||||||
|
"ceph_stable":{env:CEPH_STABLE:true}\
|
||||||
|
\}'
|
||||||
|
|
||||||
|
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
whitelist_externals =
|
whitelist_externals =
|
||||||
vagrant
|
vagrant
|
||||||
|
@ -28,6 +68,10 @@ setenv=
|
||||||
# only available for ansible >= 2.2
|
# only available for ansible >= 2.2
|
||||||
ANSIBLE_STDOUT_CALLBACK = debug
|
ANSIBLE_STDOUT_CALLBACK = debug
|
||||||
docker_cluster: PLAYBOOK = site-docker.yml.sample
|
docker_cluster: PLAYBOOK = site-docker.yml.sample
|
||||||
|
docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample
|
||||||
|
docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample
|
||||||
|
rhcs: CEPH_RHCS = true
|
||||||
|
rhcs: CEPH_STABLE = false
|
||||||
deps=
|
deps=
|
||||||
ansible1.9: ansible==1.9.4
|
ansible1.9: ansible==1.9.4
|
||||||
ansible2.1: ansible==2.1
|
ansible2.1: ansible==2.1
|
||||||
|
@ -46,18 +90,39 @@ changedir=
|
||||||
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||||
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
|
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
|
||||||
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
|
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
|
||||||
|
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-cluster-dedicated-journal
|
||||||
|
docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/docker-cluster-dmcrypt-journal-collocation
|
||||||
purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
|
purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
|
||||||
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
|
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
|
||||||
|
update_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
|
||||||
|
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||||
commands=
|
commands=
|
||||||
|
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
|
||||||
|
|
||||||
vagrant up --no-provision {posargs:--provider=virtualbox}
|
vagrant up --no-provision {posargs:--provider=virtualbox}
|
||||||
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
|
||||||
|
|
||||||
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars="fetch_directory={changedir}/fetch"
|
rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
|
||||||
|
|
||||||
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars '\
|
||||||
|
\{\
|
||||||
|
"fetch_directory":"{env:FETCH_DIRECTORY:{changedir}/fetch}",\
|
||||||
|
"ceph_rhcs":{env:CEPH_RHCS:false},\
|
||||||
|
"ceph_origin":"{env:CEPH_ORIGIN:upstream}",\
|
||||||
|
"ceph_dev":{env:CEPH_DEV:false},\
|
||||||
|
"ceph_dev_branch":"{env:CEPH_DEV_BRANCH:master}",\
|
||||||
|
"ceph_dev_sha1":"{env:CEPH_DEV_SHA1:latest}",\
|
||||||
|
"ceph_stable_release":"{env:CEPH_STABLE_RELEASE:jewel}",\
|
||||||
|
"ceph_stable":{env:CEPH_STABLE:true}\
|
||||||
|
\}'
|
||||||
|
|
||||||
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
|
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
|
||||||
|
|
||||||
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
|
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
|
||||||
|
|
||||||
purge_cluster: {[purge]commands}
|
purge_cluster: {[purge]commands}
|
||||||
purge_dmcrypt: {[purge]commands}
|
purge_dmcrypt: {[purge]commands}
|
||||||
|
update_dmcrypt: {[update]commands}
|
||||||
|
update_cluster: {[update]commands}
|
||||||
|
|
||||||
vagrant destroy --force
|
vagrant destroy --force
|
||||||
|
|
Loading…
Reference in New Issue