mirror of https://github.com/ceph/ceph-ansible.git
commit
b9be96983c
|
@ -504,7 +504,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(0..2).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '50G', :bus => "ide"
|
||||
end
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
|
|
|
@ -225,13 +225,13 @@ dummy:
|
|||
#
|
||||
#
|
||||
#ceph_osd_docker_devices: "{{ devices }}"
|
||||
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1
|
||||
#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||
|
||||
# ACTIVATE DEVICE
|
||||
# Examples:
|
||||
# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
|
||||
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
|
||||
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_BLUESTORE=1
|
||||
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
|
||||
#
|
||||
#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||
#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
||||
|
|
|
@ -143,9 +143,9 @@
|
|||
|
||||
- name: waiting for the monitor to join the quorum...
|
||||
shell: |
|
||||
ceph -s --cluster {{ cluster }} | grep election | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||
ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])'
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
until: "{{ ansible_hostname in result.stdout }}"
|
||||
retries: "{{ health_mon_check_retries }}"
|
||||
delay: "{{ health_mon_check_delay }}"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
@ -153,9 +153,9 @@
|
|||
|
||||
- name: waiting for the containerized monitor to join the quorum...
|
||||
shell: |
|
||||
docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph -s --cluster {{ cluster }} | grep quorum | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
|
||||
docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])'
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
until: "{{ ansible_hostname in result.stdout }}"
|
||||
retries: "{{ health_mon_check_retries }}"
|
||||
delay: "{{ health_mon_check_delay }}"
|
||||
delegate_to: "{{ mon_host }}"
|
||||
|
|
|
@ -217,13 +217,13 @@ ceph_config_keys: [] # DON'T TOUCH ME
|
|||
#
|
||||
#
|
||||
ceph_osd_docker_devices: "{{ devices }}"
|
||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1
|
||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||
|
||||
# ACTIVATE DEVICE
|
||||
# Examples:
|
||||
# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
|
||||
# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
|
||||
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_BLUESTORE=1
|
||||
# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
|
||||
#
|
||||
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
|
||||
ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
# NOTE (alahouze): if the device is a partition, the parted command below has
|
||||
# failed, this is why we check if the device is a partition too.
|
||||
- name: prepare dmcrypt osd disk(s) with a dedicated journal device
|
||||
- name: prepare dmcrypt osd disk(s) with a dedicated journal device (filestore)
|
||||
command: "ceph-disk prepare --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
|
||||
with_together:
|
||||
- "{{ parted_results.results }}"
|
||||
|
@ -23,6 +23,24 @@
|
|||
- item.0.get("rc", 0) != 0
|
||||
- item.1.get("rc", 0) != 0
|
||||
- not osd_auto_discovery
|
||||
- osd_objectstore == 'filestore'
|
||||
- dmcrypt_dedicated_journal
|
||||
|
||||
- name: prepare dmcrypt osd disk(s) with a dedicated journal device (bluestore)
|
||||
command: "ceph-disk prepare --bluestore --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
|
||||
with_together:
|
||||
- "{{ parted_results.results }}"
|
||||
- "{{ ispartition_results.results }}"
|
||||
- "{{ devices }}"
|
||||
- "{{ raw_journal_devices }}"
|
||||
changed_when: false
|
||||
when:
|
||||
- not item.0.get("skipped")
|
||||
- not item.1.get("skipped")
|
||||
- item.0.get("rc", 0) != 0
|
||||
- item.1.get("rc", 0) != 0
|
||||
- not osd_auto_discovery
|
||||
- osd_objectstore == 'bluestore'
|
||||
- dmcrypt_dedicated_journal
|
||||
|
||||
- include: ../activate_osds.yml
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
# NOTE (alahouze): if the device is a partition, the parted command below has
|
||||
# failed, this is why we check if the device is a partition too.
|
||||
- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal
|
||||
- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal (filestore)
|
||||
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
when:
|
||||
|
@ -18,9 +18,22 @@
|
|||
- item.value.partitions|count == 0
|
||||
- item.value.holders|count == 0
|
||||
- dmcrypt_journal_collocation
|
||||
- osd_objectstore == 'filestore'
|
||||
- osd_auto_discovery
|
||||
|
||||
- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal
|
||||
- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal (bluestore)
|
||||
command: ceph-disk prepare --bluestore --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
|
||||
with_dict: "{{ ansible_devices }}"
|
||||
when:
|
||||
- ansible_devices is defined
|
||||
- item.value.removable == "0"
|
||||
- item.value.partitions|count == 0
|
||||
- item.value.holders|count == 0
|
||||
- dmcrypt_journal_collocation
|
||||
- osd_objectstore == 'bluestore'
|
||||
- osd_auto_discovery
|
||||
|
||||
- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal (filestore)
|
||||
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
|
||||
with_together:
|
||||
- "{{ parted_results.results }}"
|
||||
|
@ -32,6 +45,22 @@
|
|||
- item.0.get("rc", 0) != 0
|
||||
- item.1.get("rc", 0) != 0
|
||||
- dmcrypt_journal_collocation
|
||||
- osd_objectstore == 'filestore'
|
||||
- not osd_auto_discovery
|
||||
|
||||
- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal (bluestore)
|
||||
command: ceph-disk prepare --bluestore --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
|
||||
with_together:
|
||||
- "{{ parted_results.results }}"
|
||||
- "{{ ispartition_results.results }}"
|
||||
- "{{ devices }}"
|
||||
when:
|
||||
- not item.0.get("skipped")
|
||||
- not item.1.get("skipped")
|
||||
- item.0.get("rc", 0) != 0
|
||||
- item.1.get("rc", 0) != 0
|
||||
- dmcrypt_journal_collocation
|
||||
- osd_objectstore == 'bluestore'
|
||||
- not osd_auto_discovery
|
||||
|
||||
- include: ../activate_osds.yml
|
||||
|
|
|
@ -11,7 +11,7 @@ function create_dev_list {
|
|||
local disks
|
||||
regex="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
# we use the prepare container to find the partitions to expose
|
||||
disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-dev${1} |& grep -Eo /dev/disk/by-partuuid/${regex} | uniq)
|
||||
disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${regex} | uniq)
|
||||
for disk in $disks; do
|
||||
DEVICES="--device $disk "
|
||||
done
|
||||
|
@ -27,12 +27,7 @@ create_dev_list $1
|
|||
/usr/bin/docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
{% if 'OSD_DMCRYPT=1' in ceph_osd_docker_extra_env -%}
|
||||
--privileged=true \
|
||||
-v /dev:/dev \
|
||||
{% else -%}
|
||||
--cap-add SYS_ADMIN \
|
||||
{% endif -%}
|
||||
{% if ansible_distribution == 'Ubuntu' -%}
|
||||
--security-opt apparmor:unconfined \
|
||||
{% endif -%}
|
||||
|
@ -45,6 +40,7 @@ create_dev_list $1
|
|||
-e KV_IP={{kv_endpoint}} \
|
||||
-e KV_PORT={{kv_port}} \
|
||||
{% endif -%}
|
||||
-v /dev:/dev \
|
||||
-v /etc/localtime:/etc/localtime:ro \
|
||||
--device=/dev/${1} \
|
||||
--device=/dev/${1}1 \
|
||||
|
|
|
@ -21,3 +21,6 @@ ceph_conf_overrides:
|
|||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
bluestore block db size = 67108864
|
||||
bluestore block wal size = 1048576000
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
|
||||
ceph_stable: True
|
||||
public_network: "192.168.11.0/24"
|
||||
cluster_network: "192.168.12.0/24"
|
||||
journal_size: 100
|
||||
monitor_interface: eth1
|
||||
dmcrypt_dedicated_journal: true
|
||||
osd_objectstore: "bluestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
raw_journal_devices:
|
||||
- '/dev/sdb'
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
bluestore block db size = 67108864
|
||||
bluestore block wal size = 1048576000
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
|
||||
ceph_stable: True
|
||||
public_network: "192.168.13.0/24"
|
||||
cluster_network: "192.168.14.0/24"
|
||||
journal_size: 100
|
||||
monitor_interface: eth1
|
||||
dmcrypt_journal_collocation: true
|
||||
osd_objectstore: "bluestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
- '/dev/sdb'
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
bluestore block db size = 67108864
|
||||
bluestore block wal size = 1048576000
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
docker: true
|
||||
containerized_deployment: True
|
||||
ceph_stable: True
|
||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||
public_network: "192.168.23.0/24"
|
||||
cluster_network: "192.168.24.0/24"
|
||||
journal_size: 100
|
||||
monitor_interface: eth1
|
||||
dmcrypt_journal_collocation: true
|
||||
osd_objectstore: "bluestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
- '/dev/sdb'
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
bluestore block db size = 67108864
|
||||
bluestore block wal size = 1048576000
|
||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
|
||||
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: true
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 1
|
||||
osd_vms: 1
|
||||
mds_vms: 0
|
||||
rgw_vms: 0
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 0
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# INSTALL SOURCE OF CEPH
|
||||
# valid values are 'stable' and 'dev'
|
||||
ceph_install_source: stable
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.23
|
||||
cluster_subnet: 192.168.24
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 512
|
||||
|
||||
# Ethernet interface name
|
||||
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
|
||||
eth: 'eth1'
|
||||
|
||||
# Disks
|
||||
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
||||
# now):
|
||||
#
|
||||
# * ceph/ubuntu-xenial
|
||||
#
|
||||
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||
# libvirt CentOS: centos/7
|
||||
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||
# For more boxes have a look at:
|
||||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||
vagrant_box: centos/7
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
#vagrant_sync_dir: /
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
||||
# VAGRANT URL
|
||||
# This is a URL to download an image from an alternate location. vagrant_box
|
||||
# above should be set to the filename of the image.
|
||||
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
# this is only here to let the CI tests know
|
||||
# that this scenario is using docker
|
||||
docker: True
|
||||
|
||||
ceph_stable: True
|
||||
containerized_deployment: True
|
||||
cluster: test
|
||||
monitor_interface: eth1
|
||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||
journal_size: 100
|
||||
ceph_docker_on_openstack: False
|
||||
public_network: "192.168.15.0/24"
|
||||
cluster_network: "192.168.16.0/24"
|
||||
ceph_rgw_civetweb_port: 8080
|
||||
ceph_osd_docker_devices: "{{ devices }}"
|
||||
devices:
|
||||
- /dev/sda
|
||||
raw_journal_devices:
|
||||
- /dev/sdb
|
||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE=1
|
||||
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
|
||||
ceph_osd_docker_run_script_path: /var/tmp
|
|
@ -15,6 +15,8 @@ cluster_network: "192.168.16.0/24"
|
|||
journal_collocation: true
|
||||
ceph_rgw_civetweb_port: 8080
|
||||
ceph_osd_docker_devices: "{{ devices }}"
|
||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1
|
||||
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
|
||||
devices:
|
||||
- /dev/sda
|
||||
- /dev/sdb
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -14,3 +14,10 @@ osd_objectstore: "bluestore"
|
|||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
||||
osd:
|
||||
bluestore block db size = 67108864
|
||||
bluestore block wal size = 1048576000
|
|
@ -38,6 +38,9 @@ eth: 'eth1'
|
|||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||
|
||||
# VM disk size in MB
|
||||
disk_size: 41000
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
|
@ -6,6 +6,7 @@ public_network: "192.168.1.0/24"
|
|||
cluster_network: "192.168.2.0/24"
|
||||
monitor_interface: eth1
|
||||
journal_size: 100
|
||||
osd_objectstore: "filestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
raw_journal_devices:
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -6,6 +6,7 @@ cluster_network: "192.168.12.0/24"
|
|||
journal_size: 100
|
||||
monitor_interface: eth1
|
||||
dmcrypt_dedicated_journal: true
|
||||
osd_objectstore: "filestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
raw_journal_devices:
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -0,0 +1,8 @@
|
|||
[mons]
|
||||
mon0
|
||||
|
||||
[osds]
|
||||
osd0
|
||||
|
||||
[mgrs]
|
||||
mgr0
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: false
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 1
|
||||
osd_vms: 1
|
||||
mds_vms: 0
|
||||
rgw_vms: 0
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 1
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# INSTALL SOURCE OF CEPH
|
||||
# valid values are 'stable' and 'dev'
|
||||
ceph_install_source: stable
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.11
|
||||
cluster_subnet: 192.168.12
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 512
|
||||
|
||||
# Ethernet interface name
|
||||
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
|
||||
eth: 'eth1'
|
||||
|
||||
# Disks
|
||||
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
||||
# now):
|
||||
#
|
||||
# * ceph/ubuntu-xenial
|
||||
#
|
||||
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||
# libvirt CentOS: centos/7
|
||||
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||
# For more boxes have a look at:
|
||||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||
vagrant_box: centos/7
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
#vagrant_sync_dir: /
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
||||
# VAGRANT URL
|
||||
# This is a URL to download an image from an alternate location. vagrant_box
|
||||
# above should be set to the filename of the image.
|
||||
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -6,6 +6,7 @@ cluster_network: "192.168.14.0/24"
|
|||
journal_size: 100
|
||||
monitor_interface: eth1
|
||||
dmcrypt_journal_collocation: true
|
||||
osd_objectstore: "filestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
- '/dev/sdb'
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: false
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 1
|
||||
osd_vms: 1
|
||||
mds_vms: 0
|
||||
rgw_vms: 0
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 0
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# INSTALL SOURCE OF CEPH
|
||||
# valid values are 'stable' and 'dev'
|
||||
ceph_install_source: stable
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.13
|
||||
cluster_subnet: 192.168.14
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 512
|
||||
|
||||
# Ethernet interface name
|
||||
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
|
||||
eth: 'eth1'
|
||||
|
||||
# Disks
|
||||
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sdb', '/dev/sdc' ]"
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
||||
# now):
|
||||
#
|
||||
# * ceph/ubuntu-xenial
|
||||
#
|
||||
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||
# libvirt CentOS: centos/7
|
||||
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||
# For more boxes have a look at:
|
||||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||
vagrant_box: centos/7
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
#vagrant_sync_dir: /
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
||||
# VAGRANT URL
|
||||
# This is a URL to download an image from an alternate location. vagrant_box
|
||||
# above should be set to the filename of the image.
|
||||
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -1,492 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
require 'yaml'
|
||||
require 'time'
|
||||
VAGRANTFILE_API_VERSION = '2'
|
||||
|
||||
DEBUG = false
|
||||
|
||||
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
|
||||
settings=YAML.load_file(config_file)
|
||||
|
||||
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
|
||||
NMONS = settings['mon_vms']
|
||||
NOSDS = settings['osd_vms']
|
||||
NMDSS = settings['mds_vms']
|
||||
NRGWS = settings['rgw_vms']
|
||||
NNFSS = settings['nfs_vms']
|
||||
RESTAPI = settings['restapi']
|
||||
NRBD_MIRRORS = settings['rbd_mirror_vms']
|
||||
CLIENTS = settings['client_vms']
|
||||
NISCSI_GWS = settings['iscsi_gw_vms']
|
||||
PUBLIC_SUBNET = settings['public_subnet']
|
||||
CLUSTER_SUBNET = settings['cluster_subnet']
|
||||
BOX = settings['vagrant_box']
|
||||
BOX_URL = settings['vagrant_box_url']
|
||||
SYNC_DIR = settings['vagrant_sync_dir']
|
||||
MEMORY = settings['memory']
|
||||
ETH = settings['eth']
|
||||
DOCKER = settings['docker']
|
||||
USER = settings['ssh_username']
|
||||
|
||||
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
|
||||
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
|
||||
ansible_provision = proc do |ansible|
|
||||
if DOCKER then
|
||||
ansible.playbook = 'site-docker.yml'
|
||||
if settings['skip_tags']
|
||||
ansible.skip_tags = settings['skip_tags']
|
||||
end
|
||||
else
|
||||
ansible.playbook = 'site.yml'
|
||||
end
|
||||
|
||||
# Note: Can't do ranges like mon[0-2] in groups because
|
||||
# these aren't supported by Vagrant, see
|
||||
# https://github.com/mitchellh/vagrant/issues/3539
|
||||
ansible.groups = {
|
||||
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
|
||||
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
|
||||
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
|
||||
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
|
||||
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
|
||||
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
|
||||
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
|
||||
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
|
||||
}
|
||||
|
||||
if RESTAPI then
|
||||
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
|
||||
end
|
||||
|
||||
ansible.extra_vars = {
|
||||
cluster_network: "#{CLUSTER_SUBNET}.0/24",
|
||||
journal_size: 100,
|
||||
public_network: "#{PUBLIC_SUBNET}.0/24",
|
||||
}
|
||||
|
||||
# In a production deployment, these should be secret
|
||||
if DOCKER then
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
containerized_deployment: 'true',
|
||||
monitor_interface: ETH,
|
||||
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
|
||||
ceph_osd_docker_devices: settings['disks'],
|
||||
devices: settings['disks'],
|
||||
ceph_docker_on_openstack: BOX == 'openstack',
|
||||
ceph_rgw_civetweb_port: 8080,
|
||||
generate_fsid: 'true',
|
||||
})
|
||||
else
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
devices: settings['disks'],
|
||||
journal_collocation: 'true',
|
||||
monitor_interface: ETH,
|
||||
os_tuning_params: settings['os_tuning_params'],
|
||||
pool_default_size: '2',
|
||||
})
|
||||
end
|
||||
|
||||
if BOX == 'linode' then
|
||||
ansible.sudo = true
|
||||
# Use monitor_address_block instead of monitor_interface:
|
||||
ansible.extra_vars.delete(:monitor_interface)
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
cluster_network: "#{CLUSTER_SUBNET}.0/16",
|
||||
devices: ['/dev/sdc'], # hardcode leftover disk
|
||||
journal_collocation: 'true',
|
||||
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
|
||||
public_network: "#{PUBLIC_SUBNET}.0/16",
|
||||
})
|
||||
end
|
||||
|
||||
if DEBUG then
|
||||
ansible.verbose = '-vvv'
|
||||
end
|
||||
ansible.limit = 'all'
|
||||
end
|
||||
|
||||
def create_vmdk(name, size)
|
||||
dir = Pathname.new(__FILE__).expand_path.dirname
|
||||
path = File.join(dir, '.vagrant', name + '.vmdk')
|
||||
`vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
|
||||
2>&1 > /dev/null` unless File.exist?(path)
|
||||
end
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = BOX
|
||||
config.vm.box_url = BOX_URL
|
||||
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
|
||||
config.ssh.private_key_path = settings['ssh_private_key_path']
|
||||
config.ssh.username = USER
|
||||
|
||||
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
|
||||
if DISABLE_SYNCED_FOLDER
|
||||
config.vm.provider :virtualbox do |v,override|
|
||||
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||
end
|
||||
config.vm.provider :libvirt do |v,override|
|
||||
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||
end
|
||||
end
|
||||
|
||||
if BOX == 'openstack'
|
||||
# OpenStack VMs
|
||||
config.vm.provider :openstack do |os|
|
||||
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
|
||||
config.ssh.pty = true
|
||||
os.openstack_auth_url = settings['os_openstack_auth_url']
|
||||
os.username = settings['os_username']
|
||||
os.password = settings['os_password']
|
||||
os.tenant_name = settings['os_tenant_name']
|
||||
os.region = settings['os_region']
|
||||
os.flavor = settings['os_flavor']
|
||||
os.image = settings['os_image']
|
||||
os.keypair_name = settings['os_keypair_name']
|
||||
os.security_groups = ['default']
|
||||
|
||||
if settings['os.networks'] then
|
||||
os.networks = settings['os_networks']
|
||||
end
|
||||
|
||||
if settings['os.floating_ip_pool'] then
|
||||
os.floating_ip_pool = settings['os_floating_ip_pool']
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
|
||||
end
|
||||
elsif BOX == 'linode'
|
||||
config.vm.provider :linode do |provider, override|
|
||||
provider.token = ENV['LINODE_API_KEY']
|
||||
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
|
||||
provider.datacenter = settings['cloud_datacenter']
|
||||
provider.plan = MEMORY.to_s
|
||||
provider.private_networking = true
|
||||
# root install generally takes <1GB
|
||||
provider.xvda_size = 4*1024
|
||||
# add some swap as the Linode distros require it
|
||||
provider.swap_size = 128
|
||||
end
|
||||
end
|
||||
|
||||
(0..CLIENTS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
|
||||
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
client.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.4#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
client.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
client.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
client.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
|
||||
# Parallels
|
||||
client.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-client#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
client.vm.provider :linode do |provider|
|
||||
provider.label = client.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NRGWS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
|
||||
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
rgw.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.5#{i}"
|
||||
end
|
||||
|
||||
# Virtualbox
|
||||
rgw.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
rgw.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
rgw.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
|
||||
# Parallels
|
||||
rgw.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-rgw#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
rgw.vm.provider :linode do |provider|
|
||||
provider.label = rgw.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NNFSS - 1).each do |i|
|
||||
config.vm.define "nfs#{i}" do |nfs|
|
||||
nfs.vm.hostname = "ceph-nfs#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
nfs.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.6#{i}"
|
||||
end
|
||||
|
||||
# Virtualbox
|
||||
nfs.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
nfs.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
nfs.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
|
||||
# Parallels
|
||||
nfs.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-nfs#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
nfs.vm.provider :linode do |provider|
|
||||
provider.label = nfs.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NMDSS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
|
||||
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
mds.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.7#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
mds.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
mds.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
mds.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
# Parallels
|
||||
mds.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-mds#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
mds.vm.provider :linode do |provider|
|
||||
provider.label = mds.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NRBD_MIRRORS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
|
||||
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
rbd_mirror.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.8#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
rbd_mirror.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
rbd_mirror.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
rbd_mirror.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
# Parallels
|
||||
rbd_mirror.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-rbd-mirror#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
rbd_mirror.vm.provider :linode do |provider|
|
||||
provider.label = rbd_mirror.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NISCSI_GWS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
|
||||
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
iscsi_gw.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.9#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
iscsi_gw.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
iscsi_gw.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
iscsi_gw.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
# Parallels
|
||||
iscsi_gw.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-iscsi-gw#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
iscsi_gw.vm.provider :linode do |provider|
|
||||
provider.label = iscsi_gw.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NMONS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
|
||||
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
mon.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.1#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
mon.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
mon.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
mon.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = false
|
||||
end
|
||||
|
||||
# Parallels
|
||||
mon.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-mon#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
mon.vm.provider :linode do |provider|
|
||||
provider.label = mon.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NOSDS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
|
||||
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
osd.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.10#{i}"
|
||||
osd.vm.network :private_network,
|
||||
ip: "#{CLUSTER_SUBNET}.20#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
osd.vm.provider :virtualbox do |vb|
|
||||
# Create our own controller for consistency and to remove VM dependency
|
||||
vb.customize ['storagectl', :id,
|
||||
'--name', 'OSD Controller',
|
||||
'--add', 'scsi']
|
||||
(0..1).each do |d|
|
||||
vb.customize ['createhd',
|
||||
'--filename', "disk-#{i}-#{d}",
|
||||
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
|
||||
vb.customize ['storageattach', :id,
|
||||
'--storagectl', 'OSD Controller',
|
||||
'--port', 3 + d,
|
||||
'--device', 0,
|
||||
'--type', 'hdd',
|
||||
'--medium', "disk-#{i}-#{d}.vdi"]
|
||||
end
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
osd.vm.provider :vmware_fusion do |v|
|
||||
(0..1).each do |d|
|
||||
v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
|
||||
v.vmx["scsi0:#{d + 1}.fileName"] =
|
||||
create_vmdk("disk-#{i}-#{d}", '11000MB')
|
||||
end
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
driverletters = ('a'..'z').to_a
|
||||
osd.vm.provider :libvirt do |lv|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(0..2).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
|
||||
end
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = false
|
||||
end
|
||||
|
||||
# Parallels
|
||||
osd.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-osd#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
(0..1).each do |d|
|
||||
prl.customize ["set", :id,
|
||||
"--device-add",
|
||||
"hdd",
|
||||
"--iface",
|
||||
"sata"]
|
||||
end
|
||||
end
|
||||
|
||||
osd.vm.provider :linode do |provider|
|
||||
provider.label = osd.vm.hostname
|
||||
end
|
||||
|
||||
# Run the provisioner after the last machine comes up
|
||||
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,492 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
require 'yaml'
|
||||
require 'time'
|
||||
VAGRANTFILE_API_VERSION = '2'
|
||||
|
||||
DEBUG = false
|
||||
|
||||
config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
|
||||
settings=YAML.load_file(config_file)
|
||||
|
||||
LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
|
||||
NMONS = settings['mon_vms']
|
||||
NOSDS = settings['osd_vms']
|
||||
NMDSS = settings['mds_vms']
|
||||
NRGWS = settings['rgw_vms']
|
||||
NNFSS = settings['nfs_vms']
|
||||
RESTAPI = settings['restapi']
|
||||
NRBD_MIRRORS = settings['rbd_mirror_vms']
|
||||
CLIENTS = settings['client_vms']
|
||||
NISCSI_GWS = settings['iscsi_gw_vms']
|
||||
PUBLIC_SUBNET = settings['public_subnet']
|
||||
CLUSTER_SUBNET = settings['cluster_subnet']
|
||||
BOX = settings['vagrant_box']
|
||||
BOX_URL = settings['vagrant_box_url']
|
||||
SYNC_DIR = settings['vagrant_sync_dir']
|
||||
MEMORY = settings['memory']
|
||||
ETH = settings['eth']
|
||||
DOCKER = settings['docker']
|
||||
USER = settings['ssh_username']
|
||||
|
||||
ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
|
||||
DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
|
||||
ansible_provision = proc do |ansible|
|
||||
if DOCKER then
|
||||
ansible.playbook = 'site-docker.yml'
|
||||
if settings['skip_tags']
|
||||
ansible.skip_tags = settings['skip_tags']
|
||||
end
|
||||
else
|
||||
ansible.playbook = 'site.yml'
|
||||
end
|
||||
|
||||
# Note: Can't do ranges like mon[0-2] in groups because
|
||||
# these aren't supported by Vagrant, see
|
||||
# https://github.com/mitchellh/vagrant/issues/3539
|
||||
ansible.groups = {
|
||||
'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
|
||||
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
|
||||
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
|
||||
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
|
||||
'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
|
||||
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
|
||||
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
|
||||
'iscsi_gw' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" }
|
||||
}
|
||||
|
||||
if RESTAPI then
|
||||
ansible.groups['restapis'] = (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }
|
||||
end
|
||||
|
||||
ansible.extra_vars = {
|
||||
cluster_network: "#{CLUSTER_SUBNET}.0/24",
|
||||
journal_size: 100,
|
||||
public_network: "#{PUBLIC_SUBNET}.0/24",
|
||||
}
|
||||
|
||||
# In a production deployment, these should be secret
|
||||
if DOCKER then
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
containerized_deployment: 'true',
|
||||
monitor_interface: ETH,
|
||||
ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
|
||||
ceph_osd_docker_devices: settings['disks'],
|
||||
devices: settings['disks'],
|
||||
ceph_docker_on_openstack: BOX == 'openstack',
|
||||
ceph_rgw_civetweb_port: 8080,
|
||||
generate_fsid: 'true',
|
||||
})
|
||||
else
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
devices: settings['disks'],
|
||||
journal_collocation: 'true',
|
||||
monitor_interface: ETH,
|
||||
os_tuning_params: settings['os_tuning_params'],
|
||||
pool_default_size: '2',
|
||||
})
|
||||
end
|
||||
|
||||
if BOX == 'linode' then
|
||||
ansible.sudo = true
|
||||
# Use monitor_address_block instead of monitor_interface:
|
||||
ansible.extra_vars.delete(:monitor_interface)
|
||||
ansible.extra_vars = ansible.extra_vars.merge({
|
||||
cluster_network: "#{CLUSTER_SUBNET}.0/16",
|
||||
devices: ['/dev/sdc'], # hardcode leftover disk
|
||||
journal_collocation: 'true',
|
||||
monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
|
||||
public_network: "#{PUBLIC_SUBNET}.0/16",
|
||||
})
|
||||
end
|
||||
|
||||
if DEBUG then
|
||||
ansible.verbose = '-vvv'
|
||||
end
|
||||
ansible.limit = 'all'
|
||||
end
|
||||
|
||||
def create_vmdk(name, size)
|
||||
dir = Pathname.new(__FILE__).expand_path.dirname
|
||||
path = File.join(dir, '.vagrant', name + '.vmdk')
|
||||
`vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
|
||||
2>&1 > /dev/null` unless File.exist?(path)
|
||||
end
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = BOX
|
||||
config.vm.box_url = BOX_URL
|
||||
config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
|
||||
config.ssh.private_key_path = settings['ssh_private_key_path']
|
||||
config.ssh.username = USER
|
||||
|
||||
# Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
|
||||
if DISABLE_SYNCED_FOLDER
|
||||
config.vm.provider :virtualbox do |v,override|
|
||||
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||
end
|
||||
config.vm.provider :libvirt do |v,override|
|
||||
override.vm.synced_folder '.', SYNC_DIR, disabled: true
|
||||
end
|
||||
end
|
||||
|
||||
if BOX == 'openstack'
|
||||
# OpenStack VMs
|
||||
config.vm.provider :openstack do |os|
|
||||
config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
|
||||
config.ssh.pty = true
|
||||
os.openstack_auth_url = settings['os_openstack_auth_url']
|
||||
os.username = settings['os_username']
|
||||
os.password = settings['os_password']
|
||||
os.tenant_name = settings['os_tenant_name']
|
||||
os.region = settings['os_region']
|
||||
os.flavor = settings['os_flavor']
|
||||
os.image = settings['os_image']
|
||||
os.keypair_name = settings['os_keypair_name']
|
||||
os.security_groups = ['default']
|
||||
|
||||
if settings['os.networks'] then
|
||||
os.networks = settings['os_networks']
|
||||
end
|
||||
|
||||
if settings['os.floating_ip_pool'] then
|
||||
os.floating_ip_pool = settings['os_floating_ip_pool']
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
|
||||
end
|
||||
elsif BOX == 'linode'
|
||||
config.vm.provider :linode do |provider, override|
|
||||
provider.token = ENV['LINODE_API_KEY']
|
||||
provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
|
||||
provider.datacenter = settings['cloud_datacenter']
|
||||
provider.plan = MEMORY.to_s
|
||||
provider.private_networking = true
|
||||
# root install generally takes <1GB
|
||||
provider.xvda_size = 4*1024
|
||||
# add some swap as the Linode distros require it
|
||||
provider.swap_size = 128
|
||||
end
|
||||
end
|
||||
|
||||
(0..CLIENTS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
|
||||
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
client.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.4#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
client.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
client.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
client.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
|
||||
# Parallels
|
||||
client.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-client#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
client.vm.provider :linode do |provider|
|
||||
provider.label = client.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NRGWS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
|
||||
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
rgw.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.5#{i}"
|
||||
end
|
||||
|
||||
# Virtualbox
|
||||
rgw.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
rgw.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
rgw.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
|
||||
# Parallels
|
||||
rgw.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-rgw#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
rgw.vm.provider :linode do |provider|
|
||||
provider.label = rgw.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NNFSS - 1).each do |i|
|
||||
config.vm.define "nfs#{i}" do |nfs|
|
||||
nfs.vm.hostname = "ceph-nfs#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
nfs.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.6#{i}"
|
||||
end
|
||||
|
||||
# Virtualbox
|
||||
nfs.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
nfs.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
nfs.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
|
||||
# Parallels
|
||||
nfs.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-nfs#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
nfs.vm.provider :linode do |provider|
|
||||
provider.label = nfs.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NMDSS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
|
||||
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
mds.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.7#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
mds.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
mds.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
mds.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
# Parallels
|
||||
mds.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-mds#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
mds.vm.provider :linode do |provider|
|
||||
provider.label = mds.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NRBD_MIRRORS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
|
||||
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
rbd_mirror.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.8#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
rbd_mirror.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
rbd_mirror.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
rbd_mirror.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
# Parallels
|
||||
rbd_mirror.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-rbd-mirror#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
rbd_mirror.vm.provider :linode do |provider|
|
||||
provider.label = rbd_mirror.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NISCSI_GWS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
|
||||
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
iscsi_gw.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.9#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
iscsi_gw.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
iscsi_gw.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
iscsi_gw.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = true
|
||||
end
|
||||
# Parallels
|
||||
iscsi_gw.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-iscsi-gw#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
iscsi_gw.vm.provider :linode do |provider|
|
||||
provider.label = iscsi_gw.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NMONS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
|
||||
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
mon.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.1#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
mon.vm.provider :virtualbox do |vb|
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
mon.vm.provider :vmware_fusion do |v|
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
mon.vm.provider :libvirt do |lv|
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = false
|
||||
end
|
||||
|
||||
# Parallels
|
||||
mon.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-mon#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
end
|
||||
|
||||
mon.vm.provider :linode do |provider|
|
||||
provider.label = mon.vm.hostname
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
(0..NOSDS - 1).each do |i|
|
||||
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
|
||||
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
|
||||
if ASSIGN_STATIC_IP
|
||||
osd.vm.network :private_network,
|
||||
ip: "#{PUBLIC_SUBNET}.10#{i}"
|
||||
osd.vm.network :private_network,
|
||||
ip: "#{CLUSTER_SUBNET}.20#{i}"
|
||||
end
|
||||
# Virtualbox
|
||||
osd.vm.provider :virtualbox do |vb|
|
||||
# Create our own controller for consistency and to remove VM dependency
|
||||
vb.customize ['storagectl', :id,
|
||||
'--name', 'OSD Controller',
|
||||
'--add', 'scsi']
|
||||
(0..1).each do |d|
|
||||
vb.customize ['createhd',
|
||||
'--filename', "disk-#{i}-#{d}",
|
||||
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
|
||||
vb.customize ['storageattach', :id,
|
||||
'--storagectl', 'OSD Controller',
|
||||
'--port', 3 + d,
|
||||
'--device', 0,
|
||||
'--type', 'hdd',
|
||||
'--medium', "disk-#{i}-#{d}.vdi"]
|
||||
end
|
||||
vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
|
||||
end
|
||||
|
||||
# VMware
|
||||
osd.vm.provider :vmware_fusion do |v|
|
||||
(0..1).each do |d|
|
||||
v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
|
||||
v.vmx["scsi0:#{d + 1}.fileName"] =
|
||||
create_vmdk("disk-#{i}-#{d}", '11000MB')
|
||||
end
|
||||
v.vmx['memsize'] = "#{MEMORY}"
|
||||
end
|
||||
|
||||
# Libvirt
|
||||
driverletters = ('a'..'z').to_a
|
||||
osd.vm.provider :libvirt do |lv|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(0..2).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
|
||||
end
|
||||
lv.memory = MEMORY
|
||||
lv.random_hostname = false
|
||||
end
|
||||
|
||||
# Parallels
|
||||
osd.vm.provider "parallels" do |prl|
|
||||
prl.name = "ceph-osd#{i}"
|
||||
prl.memory = "#{MEMORY}"
|
||||
(0..1).each do |d|
|
||||
prl.customize ["set", :id,
|
||||
"--device-add",
|
||||
"hdd",
|
||||
"--iface",
|
||||
"sata"]
|
||||
end
|
||||
end
|
||||
|
||||
osd.vm.provider :linode do |provider|
|
||||
provider.label = osd.vm.hostname
|
||||
end
|
||||
|
||||
# Run the provisioner after the last machine comes up
|
||||
osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -0,0 +1,5 @@
|
|||
[mons]
|
||||
mon0
|
||||
|
||||
[osds]
|
||||
osd0
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -0,0 +1,5 @@
|
|||
[mons]
|
||||
mon0
|
||||
|
||||
[osds]
|
||||
osd0
|
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: True
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 1
|
||||
osd_vms: 1
|
||||
mds_vms: 0
|
||||
rgw_vms: 0
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 0
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 0
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.15
|
||||
cluster_subnet: 192.168.16
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 1024
|
||||
|
||||
# Disks
|
||||
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
||||
# now):
|
||||
#
|
||||
# * ceph/ubuntu-xenial
|
||||
#
|
||||
# NOTE(leseb): we use centos for this scenario since we at least need Docker version 1.12.5
|
||||
# which is not available in Atomic Host.
|
||||
# There are bug like this one: https://github.com/docker/docker/issues/12694
|
||||
vagrant_box: centos/7
|
||||
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
#vagrant_sync_dir: /
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
# this is only here to let the CI tests know
|
||||
# that this scenario is using docker
|
||||
docker: True
|
||||
|
||||
ceph_stable: True
|
||||
containerized_deployment: True
|
||||
cluster: test
|
||||
monitor_interface: eth1
|
||||
ceph_mon_docker_subnet: "{{ public_network }}"
|
||||
journal_size: 100
|
||||
ceph_docker_on_openstack: False
|
||||
public_network: "192.168.15.0/24"
|
||||
cluster_network: "192.168.16.0/24"
|
||||
journal_collocation: true
|
||||
ceph_rgw_civetweb_port: 8080
|
||||
ceph_osd_docker_devices: "{{ devices }}"
|
||||
ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
|
||||
ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE
|
||||
devices:
|
||||
- /dev/sda
|
||||
- /dev/sdb
|
||||
ceph_osd_docker_run_script_path: /var/tmp
|
||||
rgw_override_bucket_index_max_shards: 16
|
||||
rgw_bucket_default_quota_max_objects: 1638400
|
||||
ceph_conf_overrides:
|
||||
global:
|
||||
osd_pool_default_pg_num: 8
|
||||
osd_pool_default_size: 1
|
|
@ -0,0 +1,16 @@
|
|||
[mons]
|
||||
mon0
|
||||
mon1
|
||||
mon2
|
||||
|
||||
[osds]
|
||||
osd0
|
||||
|
||||
[mdss]
|
||||
mds0
|
||||
|
||||
[rgws]
|
||||
rgw0
|
||||
|
||||
[mgrs]
|
||||
mgr0
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
|
||||
# DEPLOY CONTAINERIZED DAEMONS
|
||||
docker: True
|
||||
|
||||
# DEFINE THE NUMBER OF VMS TO RUN
|
||||
mon_vms: 3
|
||||
osd_vms: 1
|
||||
mds_vms: 1
|
||||
rgw_vms: 1
|
||||
nfs_vms: 0
|
||||
rbd_mirror_vms: 1
|
||||
client_vms: 0
|
||||
iscsi_gw_vms: 0
|
||||
mgr_vms: 1
|
||||
|
||||
# Deploy RESTAPI on each of the Monitors
|
||||
restapi: true
|
||||
|
||||
# SUBNETS TO USE FOR THE VMS
|
||||
public_subnet: 192.168.15
|
||||
cluster_subnet: 192.168.16
|
||||
|
||||
# MEMORY
|
||||
# set 1024 for CentOS
|
||||
memory: 1024
|
||||
|
||||
# Disks
|
||||
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
|
||||
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
disks: "[ '/dev/sda', '/dev/sdb' ]"
|
||||
|
||||
# VAGRANT BOX
|
||||
# Ceph boxes are *strongly* suggested. They are under better control and will
|
||||
# not get updated frequently unless required for build systems. These are (for
|
||||
# now):
|
||||
#
|
||||
# * ceph/ubuntu-xenial
|
||||
#
|
||||
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
|
||||
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
|
||||
# libvirt CentOS: centos/7
|
||||
# parallels Ubuntu: parallels/ubuntu-14.04
|
||||
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
|
||||
# For more boxes have a look at:
|
||||
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
|
||||
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
|
||||
vagrant_box: centos/atomic-host
|
||||
#ssh_private_key_path: "~/.ssh/id_rsa"
|
||||
# The sync directory changes based on vagrant box
|
||||
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
|
||||
#vagrant_sync_dir: /home/vagrant/sync
|
||||
#vagrant_sync_dir: /
|
||||
# Disables synced folder creation. Not needed for testing, will skip mounting
|
||||
# the vagrant directory on the remote box regardless of the provider.
|
||||
vagrant_disable_synced_folder: true
|
||||
# VAGRANT URL
|
||||
# This is a URL to download an image from an alternate location. vagrant_box
|
||||
# above should be set to the filename of the image.
|
||||
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
||||
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
|
||||
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
|
|
@ -0,0 +1 @@
|
|||
../../../../../Vagrantfile
|
|
@ -6,6 +6,7 @@ public_network: "192.168.3.0/24"
|
|||
cluster_network: "192.168.4.0/24"
|
||||
monitor_interface: eth1
|
||||
journal_size: 100
|
||||
osd_objectstore: "filestore"
|
||||
devices:
|
||||
- '/dev/sda'
|
||||
- '/dev/sdb'
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
os_tuning_params:
|
||||
- { name: kernel.pid_max, value: 4194303 }
|
||||
- { name: fs.file-max, value: 26234859 }
|
|
@ -0,0 +1,5 @@
|
|||
[mons]
|
||||
mon0
|
||||
|
||||
[osds]
|
||||
osd0
|
|
@ -4,4 +4,4 @@ class TestOSD(object):
|
|||
def test_osds_are_all_collocated(self, node, Command):
|
||||
# TODO: figure out way to paramaterize node['vars']['devices'] for this test
|
||||
for device in node["vars"]["devices"]:
|
||||
assert Command.check_output("sudo blkid -s PARTLABEL -o value %s2" % device) == "ceph journal"
|
||||
assert Command.check_output("sudo blkid -s PARTLABEL -o value %s2" % device) in ["ceph journal", "ceph block"]
|
||||
|
|
44
tox.ini
44
tox.ini
|
@ -1,5 +1,7 @@
|
|||
[tox]
|
||||
envlist = {jewel,kraken,rhcs}-{ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,bluestore_journal_collocation,bluestore_cluster}
|
||||
envlist = {jewel,luminous,rhcs}-{ansible2.2}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster}
|
||||
{luminous}-{ansible2.2}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation}
|
||||
|
||||
skipsdist = True
|
||||
|
||||
# extra commands for purging clusters
|
||||
|
@ -64,11 +66,18 @@ setenv=
|
|||
purge_docker_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml
|
||||
docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample
|
||||
docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample
|
||||
bluestore_docker_cluster: PLAYBOOK = site-docker.yml.sample
|
||||
bluestore_docker_dedicated_journal: PLAYBOOK = site-docker.yml.sample
|
||||
bluestore_docker_dmcrypt_journal_collocation: PLAYBOOK = site-docker.yml.sample
|
||||
|
||||
rhcs: CEPH_STABLE_RELEASE = jewel
|
||||
jewel: CEPH_STABLE_RELEASE = jewel
|
||||
jewel: CEPH_DOCKER_IMAGE_TAG = tag-build-master-jewel-ubuntu-16.04
|
||||
kraken: CEPH_STABLE_RELEASE = kraken
|
||||
kraken: UPDATE_CEPH_STABLE_RELEASE = luminous
|
||||
jewel: UPDATE_CEPH_STABLE_RELEASE = kraken
|
||||
luminous: CEPH_STABLE_RELEASE = luminous
|
||||
luminous: CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04
|
||||
luminous: UPDATE_CEPH_STABLE_RELEASE = luminous
|
||||
luminous: UPDATE_CEPH_DOCKER_IMAGE_TAG = tag-build-master-luminous-ubuntu-16.04
|
||||
deps=
|
||||
ansible1.9: ansible==1.9.4
|
||||
ansible2.1: ansible==2.1
|
||||
|
@ -78,27 +87,32 @@ changedir=
|
|||
# tests a 1 mon, 1 osd, 1 mds and 1 rgw xenial cluster using raw_multi_journal OSD scenario
|
||||
xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
|
||||
# tests a 1 mon 1 osd centos7 cluster using journal_collocation OSD scenario
|
||||
journal_collocation: {toxinidir}/tests/functional/centos/7/journal-collocation
|
||||
journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col
|
||||
# tests a 1 mon 1 osd centos7 cluster using dmcrypt_dedicated_journal OSD scenario
|
||||
dmcrypt_journal: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
|
||||
dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
|
||||
# tests a 1 mon 1 osd centos7 cluster using dmcrypt_journal_collocation OSD scenario
|
||||
dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/dmcrypt-journal-collocation
|
||||
dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/crypt-jrn-col
|
||||
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using raw_multi_journal OSD scenario
|
||||
centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||
# an alias for centos7_cluster, this makes the name better suited for rhcs testing
|
||||
cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
|
||||
docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
|
||||
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
|
||||
purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker-cluster
|
||||
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-cluster-dedicated-journal
|
||||
docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/docker-cluster-dmcrypt-journal-collocation
|
||||
docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
||||
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
||||
purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
|
||||
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-ded-jrn
|
||||
docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/docker-crypt-jrn-col
|
||||
purge_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
|
||||
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
|
||||
update_dmcrypt: {toxinidir}/tests/functional/centos/7/dmcrypt-dedicated-journal
|
||||
purge_dmcrypt: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
|
||||
update_dmcrypt: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
|
||||
update_cluster: {toxinidir}/tests/functional/centos/7/cluster
|
||||
bluestore_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-journal-collocation
|
||||
bluestore_cluster: {toxinidir}/tests/functional/centos/7/bs-cluster
|
||||
bluestore_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-jrn-col
|
||||
bluestore_cluster: {toxinidir}/tests/functional/centos/7/bluestore
|
||||
bluestore_dmcrypt_journal: {toxinidir}/tests/functional/centos/7/bs-crypt-ded-jrn
|
||||
bluestore_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-crypt-jrn-col
|
||||
bluestore_docker_cluster: {toxinidir}/tests/functional/centos/7/bs-docker
|
||||
bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn
|
||||
bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col
|
||||
|
||||
commands=
|
||||
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
|
||||
|
|
Loading…
Reference in New Issue