update osd pool set size command

Since [1] we can't use osd pool without replicas (size: 1) by default.
We now need to set the mon_allow_pool_size_one flag to true in the ceph
configuration and add the --yes-i-really-mean-it flag to the osd pool
set size cli.

[1] https://github.com/ceph/ceph/commit/21508bd

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
pull/5142/head
Dimitri Savineau 2020-03-10 20:50:55 -04:00 committed by Guillaume Abrioux
parent b3bbd6bb77
commit e62532de46
54 changed files with 54 additions and 5 deletions

View File

@ -116,7 +116,7 @@
- name: customize pool size
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}
with_items: "{{ pools | unique }}"
delegate_to: "{{ delegated_node }}"
changed_when: false

View File

@ -72,7 +72,7 @@
run_once: True
- name: customize pool size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ iscsi_pool_name }} size {{ iscsi_pool_size | default(osd_pool_default_size) }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ iscsi_pool_name }} size {{ iscsi_pool_size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if iscsi_pool_size | default(osd_pool_default_size) | int == 1 else '' }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: iscsi_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size

View File

@ -51,7 +51,7 @@
with_items: "{{ cephfs_pools | unique }}"
- name: customize pool size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}"
with_items: "{{ cephfs_pools | unique }}"
changed_when: false
when:

View File

@ -46,7 +46,7 @@
- name: customize pool size
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}
osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.size | default(osd_pool_default_size) | int == 1 else '' }}
with_items: "{{ openstack_pools | unique }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false

View File

@ -48,7 +48,7 @@
when: item.value.type is not defined or item.value.type == 'replicated'
- name: customize replicated pool size
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }}"
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.value.size | default(osd_pool_default_size) }} {{ '--yes-i-really-mean-it' if item.value.size | default(osd_pool_default_size) | int == 1 else '' }}"
register: result
retries: 60
delay: 3

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -18,6 +18,7 @@ dashboard_enabled: false
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
handler_health_mon_check_delay: 10

View File

@ -15,6 +15,7 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
handler_health_mon_check_delay: 10

View File

@ -3,6 +3,7 @@
"global": {
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false
}
},

View File

@ -14,6 +14,7 @@ rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: True

View File

@ -6,6 +6,7 @@ cluster_network: "192.168.2.0/24"
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: True

View File

@ -16,6 +16,7 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
handler_health_mon_check_delay: 10

View File

@ -13,6 +13,7 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
handler_health_mon_check_delay: 10

View File

@ -3,6 +3,7 @@
"global": {
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false
}
},

View File

@ -14,6 +14,7 @@ rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -18,6 +18,7 @@ dashboard_enabled: false
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
handler_health_mon_check_delay: 10

View File

@ -15,6 +15,7 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
handler_health_mon_check_delay: 10

View File

@ -17,6 +17,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -12,6 +12,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -15,6 +15,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -17,6 +17,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -17,6 +17,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -12,6 +12,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -19,6 +19,7 @@ os_tuning_params:
ceph_conf_overrides:
global:
osd_pool_default_pg_num: 8
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_admin_password: $sX!cD$rYU6qR^B!

View File

@ -3,6 +3,7 @@ all:
admin_secret: AQBSV4xaAAAAABAA3VUTiOZTHecau2SnAEVPYQ==
ceph_conf_overrides:
global: {osd_pool_default_pg_num: 8, osd_pool_default_pgp_num: 8, osd_pool_default_size: 1,
mon_allow_pool_size_one: true,
mon_warn_on_pool_no_redundancy: false,
rgw_keystone_accepted_roles: 'Member, admin', rgw_keystone_admin_domain: default,
rgw_keystone_admin_password: RtYPg7AUdsZCGv4Z4rF8FvnaR, rgw_keystone_admin_project: service,

View File

@ -14,6 +14,7 @@ rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: True

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -23,6 +23,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -21,6 +21,7 @@ os_tuning_params:
- { name: fs.file-max, value: 26234859 }
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp

View File

@ -11,6 +11,7 @@ public_network: "192.168.79.0/24"
cluster_network: "192.168.80.0/24"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -10,6 +10,7 @@ osd_objectstore: "bluestore"
copy_admin_key: true
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -11,6 +11,7 @@ public_network: "192.168.83.0/24"
cluster_network: "192.168.84.0/24"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -7,6 +7,7 @@ monitor_interface: eth1
radosgw_interface: eth1
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -11,6 +11,7 @@ public_network: "192.168.17.0/24"
cluster_network: "192.168.18.0/24"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -5,6 +5,7 @@ public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -11,6 +11,7 @@ public_network: "192.168.73.0/24"
cluster_network: "192.168.74.0/24"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -10,6 +10,7 @@ ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -8,6 +8,7 @@ osd_objectstore: "bluestore"
copy_admin_key: true
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False

View File

@ -12,6 +12,7 @@ ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False

View File

@ -9,6 +9,7 @@ osd_objectstore: "bluestore"
copy_admin_key: true
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
dashboard_enabled: False