dashboard: align the way containers are managed

This commit aligns the way the different containers are managed with how
it's currently done with the other ceph daemon.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit cc285c417a)
pull/3999/head
Guillaume Abrioux 2019-05-16 10:56:06 +02:00
parent 567c6ceb43
commit aa80895d19
10 changed files with 89 additions and 145 deletions

View File

@ -1,8 +0,0 @@
---
- name: enable service
# We use the systemd module here so we can use the daemon_reload feature,
# since we're shipping the .service file ourselves
systemd:
name: grafana-server
daemon_reload: true
enabled: true

View File

@ -16,31 +16,6 @@
- /etc/grafana - /etc/grafana
- /var/lib/grafana - /var/lib/grafana
- name: make sure the grafana-server service is down
service:
name: grafana-server
state: stopped
failed_when: false
# Make sure we re-create the container
- name: remove old grafana-server container
command: "{{ container_binary }} rm -f grafana-server"
changed_when: false
failed_when: false
- name: create grafana-server container
shell: |
{{ container_binary }} create --name grafana-server \
-v "/etc/grafana:/etc/grafana:Z" \
-v "/var/lib/grafana:/var/lib/grafana:Z" \
"--net=host" \
"--cpu-period={{ grafana_container_cpu_period }}" \
"--cpu-quota={{ grafana_container_cpu_period * grafana_container_cpu_cores }}" \
"--memory={{ grafana_container_memory }}GB" \
"--memory-swap={{ grafana_container_memory * 2 }}GB" \
-e "GF_INSTALL_PLUGINS={{ grafana_plugins|join(',') }}" \
"{{ grafana_container_image }}"
- name: ship systemd service - name: ship systemd service
template: template:
src: grafana-server.service.j2 src: grafana-server.service.j2
@ -48,4 +23,11 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
notify: enable service
- name: start the grafana-server service
systemd:
name: grafana-server
state: started
enabled: yes
daemon_reload: yes
failed_when: false

View File

@ -6,14 +6,25 @@ Description=grafana-server
After=docker.service After=docker.service
{% endif %} {% endif %}
[Service] [Service]
EnvironmentFile=-/etc/environment EnvironmentFile=-/etc/environment
ExecStart=/usr/bin/{{ container_binary }} start --attach grafana-server ExecStartPre=-/usr/bin/{{ container_binary }} stop grafana-server
ExecStartPre=-/usr/bin/{{ container_binary }} rm grafana-server
ExecStart=/usr/bin/{{ container_binary }} run --rm --name=grafana-server \
-v /etc/grafana:/etc/grafana:Z \
-v /var/lib/grafana:/var/lib/grafana:Z \
--net=host \
--cpu-period={{ grafana_container_cpu_period }} \
--cpu-quota={{ grafana_container_cpu_period * grafana_container_cpu_cores }} \
--memory={{ grafana_container_memory }}GB \
--memory-swap={{ grafana_container_memory * 2 }}GB \
-e GF_INSTALL_PLUGINS={{ grafana_plugins|join(',') }} \
{{ grafana_container_image }}
ExecStop=-/usr/bin/{{ container_binary }} stop grafana-server ExecStop=-/usr/bin/{{ container_binary }} stop grafana-server
Restart=always Restart=always
RestartSec=10s RestartSec=10s
TimeoutStartSec=120 TimeoutStartSec=120
TimeoutStopSec=15 TimeoutStopSec=15
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View File

@ -458,13 +458,3 @@
set_fact: set_fact:
_rbd_target_api_handler_called: False _rbd_target_api_handler_called: False
listen: "restart ceph rbd-target-api" listen: "restart ceph rbd-target-api"
- name: restart node-exporter service
listen: "restart node-exporter service"
# We use the systemd module here so we can use the daemon_reload feature,
# since we're shipping the .service file ourselves
systemd:
name: 'node_exporter'
daemon_reload: true
enabled: true
state: restarted

View File

@ -1,27 +1,4 @@
--- ---
- name: make sure the node_exporter service is down
service:
name: node_exporter
state: stopped
failed_when: false
# Make sure we re-create the container
- name: remove old node-exporter container
command: "{{ container_binary }} rm -f node-exporter"
changed_when: false
failed_when: false
- name: start node-exporter container
shell: |
{{ container_binary }} run --detach --name node-exporter \
-v /proc:/host/proc:ro -v /sys:/host/sys:ro \
--net=host \
{{ node_exporter_container_image }} \
'--path.procfs=/host/proc' \
'--path.sysfs=/host/sys' \
'--no-collector.timex'
notify: restart node-exporter service
- name: ship systemd service - name: ship systemd service
template: template:
src: node_exporter.service.j2 src: node_exporter.service.j2
@ -29,4 +6,11 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
notify: restart node-exporter service
- name: start the node_exporter service
systemd:
name: node_exporter
state: started
enabled: yes
daemon_reload: yes
failed_when: false

View File

@ -8,7 +8,14 @@ After=docker.service
[Service] [Service]
EnvironmentFile=-/etc/environment EnvironmentFile=-/etc/environment
ExecStart=/usr/bin/{{ container_binary }} start --attach node-exporter ExecStartPre=-/usr/bin/{{ container_binary }} rm -f node-exporter
ExecStart=/usr/bin/{{ container_binary }} run --name=node-exporter \
-v /proc:/host/proc:ro -v /sys:/host/sys:ro \
--net=host \
--path.procfs=/host/proc \
--path.sysfs=/host/sys \
--no-collector.timex \
{{ node_exporter_container_image }}
# Make sure the cfg80211 is loaded before running the container, the node # Make sure the cfg80211 is loaded before running the container, the node
# exporter needs this module loaded to test for presence of wi-fi devices # exporter needs this module loaded to test for presence of wi-fi devices
ExecStartPre=/usr/sbin/modprobe cfg80211 ExecStartPre=/usr/sbin/modprobe cfg80211

View File

@ -1,60 +1,4 @@
--- ---
- name: make sure the alertmanager service is down
service:
name: alertmanager
state: stopped
failed_when: false
# Make sure we re-create the container
- name: remove old alertmanager container
command: "{{ container_binary }} rm -f alertmanager"
changed_when: false
failed_when: false
- name: start alertmanager container
shell: |
{{ container_binary }} run --detach --name alertmanager \
-v "{{ alertmanager_conf_dir }}:/etc/alertmanager:Z" \
-v "{{ alertmanager_data_dir }}:/alertmanager:Z" \
"--net=host" \
"--cpu-period={{ alertmanager_container_cpu_period }}" \
"--cpu-quota={{ alertmanager_container_cpu_period * alertmanager_container_cpu_cores }}" \
"--memory={{ alertmanager_container_memory }}GB" \
"--memory-swap={{ alertmanager_container_memory * 2 }}GB" \
"{{ alertmanager_container_image }}" \
"--config.file=/etc/alertmanager/alertmanager.yml" \
"--storage.path=/alertmanager"
notify: service handler
- name: make sure the prometheus service is down
service:
name: prometheus
state: stopped
failed_when: false
# Make sure we re-create the container
- name: remove old prometheus container
command: "{{ container_binary }} rm -f prometheus"
changed_when: false
failed_when: false
- name: start prometheus container
shell: |
{{ container_binary }} run --detach --name prometheus \
-v "{{ prometheus_conf_dir }}:/etc/prometheus:Z" \
-v "{{ prometheus_data_dir }}:/prometheus:Z" \
"--net=host" \
"--user={{ prometheus_user_id }}" \
"--cpu-period={{ prometheus_container_cpu_period }}" \
"--cpu-quota={{ prometheus_container_cpu_period * prometheus_container_cpu_cores }}" \
"--memory={{ prometheus_container_memory }}GB" \
"--memory-swap={{ prometheus_container_memory * 2 }}GB" \
"{{ prometheus_container_image }}" \
"--config.file=/etc/prometheus/prometheus.yml" \
"--storage.tsdb.path=/prometheus" \
"--web.external-url=http://{{ inventory_hostname }}:9090/"
notify: service handler
- name: ship systemd services - name: ship systemd services
template: template:
src: "{{ item }}.j2" src: "{{ item }}.j2"
@ -66,3 +10,13 @@
- 'alertmanager.service' - 'alertmanager.service'
- 'prometheus.service' - 'prometheus.service'
notify: service handler notify: service handler
- name: start prometheus services
systemd:
name: "{{ item }}"
daemon_reload: true
enabled: true
state: started
with_items:
- prometheus
- alertmanager

View File

@ -8,7 +8,18 @@ After=docker.service
[Service] [Service]
EnvironmentFile=-/etc/environment EnvironmentFile=-/etc/environment
ExecStart=/usr/bin/{{ container_binary }} start --attach alertmanager ExecStartPre=-/usr/bin/{{ container_binary }} rm -f alertmanager
ExecStart=/usr/bin/{{ container_binary }} run --name=alertmanager \
-v "{{ alertmanager_conf_dir }}:/etc/alertmanager:Z" \
-v "{{ alertmanager_data_dir }}:/alertmanager:Z" \
--net=host \
--cpu-period={{ alertmanager_container_cpu_period }} \
--cpu-quota={{ alertmanager_container_cpu_period * alertmanager_container_cpu_cores }} \
--memory={{ alertmanager_container_memory }}GB \
--memory-swap={{ alertmanager_container_memory * 2 }}GB \
{{ alertmanager_container_image }} \
--config.file=/etc/alertmanager/alertmanager.yml \
--storage.path=/alertmanager"
ExecStop=/usr/bin/{{ container_binary }} stop alertmanager ExecStop=/usr/bin/{{ container_binary }} stop alertmanager
Restart=always Restart=always
RestartSec=10s RestartSec=10s

View File

@ -8,7 +8,20 @@ After=docker.service
[Service] [Service]
EnvironmentFile=-/etc/environment EnvironmentFile=-/etc/environment
ExecStart=/usr/bin/{{ container_binary }} start --attach prometheus ExecStartPre=-/usr/bin/{{ container_binary }} rm -f prometheus
ExecStart=/usr/bin/{{ container_binary }} run --name=prometheus \
-v "{{ prometheus_conf_dir }}:/etc/prometheus:Z" \
-v "{{ prometheus_data_dir }}:/prometheus:Z" \
--net=host \
--user={{ prometheus_user_id }} \
--cpu-period={{ prometheus_container_cpu_period }} \
--cpu-quota={{ prometheus_container_cpu_period * prometheus_container_cpu_cores }} \
--memory={{ prometheus_container_memory }}GB \
--memory-swap={{ prometheus_container_memory * 2 }}GB \
{{ prometheus_container_image }} \
--config.file=/etc/prometheus/prometheus.yml \
--storage.tsdb.path=/prometheus \
--web.external-url=http://{{ inventory_hostname }}:9090/"
ExecStop=/usr/bin/{{ container_binary }} stop prometheus ExecStop=/usr/bin/{{ container_binary }} stop prometheus
Restart=always Restart=always
RestartSec=10s RestartSec=10s