Add etcd proxy support

* Enforce a etcd-proxy role to a k8s-cluster group members. This
provides an HA layout for all of the k8s cluster internal clients.
* Proxies to be run on each node in the group as a separate etcd
instances with a readwrite proxy mode and listen the given endpoint,
which is either the access_ip:2379 or the localhost:2379.
* A notion for the 'kube_etcd_multiaccess' is: ignore endpoints and
loadbalancers and use the etcd members IPs as a comma-separated
list. Otherwise, clients shall use the local endpoint provided by a
etcd-proxy instances on each etcd node. A Netwroking plugins always
use that access mode.
* Fix apiserver's etcd servers args to use the etcd_access_endpoint.
* Fix networking plugins flannel/calico to use the etcd_endpoint.
* Fix name env var for non masters to be set as well.
* Fix etcd_client_url was not used anywhere and other etcd_* facts
evaluation was duplicated in a few places.
* Define proxy modes only in the env file, if not a master. Del
an automatic proxy mode decisions for etcd nodes in init/unit scripts.
* Use Wants= instead of Requires= as "This is the recommended way to
hook start-up of one unit to the start-up of another unit"
* Make apiserver/calico Wants= etcd-proxy to keep it always up

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
Co-authored-by: Matthew Mosesohn <mmosesohn@mirantis.com>
pull/355/head
Bogdan Dobrelya 2016-07-11 16:05:05 +02:00
parent a2540e3318
commit 32cd6e99b2
26 changed files with 396 additions and 56 deletions

View File

@ -51,6 +51,23 @@ cluster_name: cluster.local
# but don't know about that address themselves.
# access_ip: 1.1.1.1
# Service endpoints. May be a VIP or a load balanced frontend IP, like one
# that a HAProxy or Nginx provides, or just a local service endpoint.
#
# Etcd endpoints use a local etcd-proxies to reach the etcd cluster via
# auto-evaluated endpoints. Those will reuse the access_ip for etcd cluster,
# if specified, or defer to the localhost:2379 as well.
# Etcd access modes:
# Enable multiaccess to configure clients to access all of the etcd members directly
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: false
#
# TODO apiserver localhost:8080 and localhost:443 endpoints for kubelets and
# (hyper)kube-* and networking components.
# Choose network plugin (calico, weave or flannel)
kube_network_plugin: flannel

View File

@ -6,6 +6,13 @@
- start etcd
- reload etcd
- name: restart etcd-proxy
command: /bin/true
notify:
- reload systemd
- start etcd-proxy
- reload etcd-proxy
- name: reload systemd
command: systemctl daemon-reload
when: ansible_service_mgr == "systemd"
@ -15,8 +22,23 @@
name: etcd
enabled: yes
state: started
when: is_etcd_master
- name: start etcd-proxy
service:
name: etcd-proxy
enabled: yes
state: started
when: is_etcd_proxy
- name: reload etcd
service:
name: etcd
state: "{{ 'restarted' if etcd_deployment_type == 'host' else 'reloaded' }}"
when: is_etcd_master
- name: reload etcd-proxy
service:
name: etcd-proxy
state: "{{ 'restarted' if etcd_deployment_type == 'host' else 'reloaded' }}"
when: is_etcd_proxy

View File

@ -15,7 +15,7 @@
src: "etcd-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd.service
backup: yes
when: ansible_service_mgr == "systemd"
when: ansible_service_mgr == "systemd" and is_etcd_master
notify: restart etcd
- name: Configure | Write etcd initd script
@ -24,5 +24,21 @@
dest: /etc/init.d/etcd
owner: root
mode: 0755
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian"
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_master
notify: restart etcd
- name: Configure | Copy etcd-proxy.service systemd file
template:
src: "etcd-proxy-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd-proxy.service
backup: yes
when: ansible_service_mgr == "systemd" and is_etcd_proxy
notify: restart etcd-proxy
- name: Configure | Write etcd-proxy initd script
template:
src: "deb-etcd-proxy-{{ etcd_deployment_type }}.initd.j2"
dest: /etc/init.d/etcd-proxy
owner: root
mode: 0755
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_proxy
notify: restart etcd-proxy

View File

@ -8,7 +8,12 @@
- name: Restart etcd if binary changed
command: /bin/true
notify: restart etcd
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
- name: Restart etcd-proxy if binary changed
command: /bin/true
notify: restart etcd-proxy
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_proxy
# Reload systemd before starting service
- meta: flush_handlers
@ -18,6 +23,14 @@
name: etcd
state: started
enabled: yes
when: is_etcd_master
- name: Ensure etcd-proxy is running
service:
name: etcd-proxy
state: started
enabled: yes
when: is_etcd_proxy
# After etcd cluster is assembled, make sure that
# initial state of the cluster is in `existing`

View File

@ -4,3 +4,11 @@
src: etcd.j2
dest: /etc/etcd.env
notify: restart etcd
when: is_etcd_master
- name: Refresh config | Create etcd-proxy config file
template:
src: etcd-proxy.j2
dest: /etc/etcd-proxy.env
notify: restart etcd-proxy
when: is_etcd_proxy

View File

@ -1,17 +1,28 @@
---
- set_fact: etcd_access_address="{{ access_ip | default(ip | default(ansible_default_ipv4['address'])) }}"
- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}"
- set_fact: etcd_peer_url="http://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="http://{{ etcd_access_address }}:2379"
- set_fact: etcd_authority="{{ access_ip|default('127.0.0.1') }}:2379"
- set_fact: etcd_endpoint="http://{{ etcd_authority }}"
- set_fact:
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
http://{{ hostvars[item].etcd_access_address }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
etcd_member_name: |-
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
- set_fact:
etcd_proxy_member_name: |-
{% for host in groups['k8s-cluster'] %}
{% if inventory_hostname == host %}{{"etcd-proxy"+loop.index|string }}{% endif %}
{% endfor %}
- set_fact:
is_etcd_proxy: "{{ inventory_hostname in groups['k8s-cluster'] }}"
- set_fact:
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
- set_fact:

View File

@ -16,23 +16,16 @@ PATH=/sbin:/usr/sbin:/bin/:/usr/bin
DESC="etcd k/v store"
NAME=etcd
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
{% if is_etcd_master %}
DAEMON_ARGS='--restart=always --env-file=/etc/etcd.env \
--net=host \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
-v /var/lib/etcd:/var/lib/etcd:rw \
--name={{ etcd_member_name | default("etcd-proxy") }} \
--name={{ etcd_member_name | default("etcd") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd \
{% endif %}
{% if is_etcd_master %}
--proxy off
{% else %}
--proxy on
{{ etcd_container_bin_dir }}etcd
{% endif %}'
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
@ -41,9 +34,6 @@ PID=/var/run/etcd.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -f /etc/etcd.env ] && . /etc/etcd.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.

View File

@ -16,11 +16,6 @@ PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="etcd k/v store"
NAME=etcd
DAEMON={{ bin_dir }}/etcd
{% if is_etcd_master %}
DAEMON_ARGS=""
{% else %}
DAEMON_ARGS="--proxy on"
{% endif %}
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"

View File

@ -0,0 +1,117 @@
#!/bin/sh
set -a
### BEGIN INIT INFO
# Provides: etcd-proxy
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: etcd-proxy
# Description:
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin/:/usr/bin
DESC="etcd-proxy"
NAME=etcd-proxy
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
DAEMON_ARGS='--restart=always --env-file=/etc/etcd-proxy.env \
--net=host \
--stop-signal=SIGKILL \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd
{% endif %}'
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd-proxy
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
PID=/var/run/etcd-proxy.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
do_status()
{
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
}
# Function that starts the daemon/service
#
do_start()
{
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
RETVAL="$?"
sleep 1
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if do_stop; then
log_end_msg 0
else
log_failure_msg "Can't stop etcd-proxy"
log_end_msg 1
fi
;;
status)
if do_status; then
log_end_msg 0
else
log_failure_msg "etcd-proxy is not running"
log_end_msg 1
fi
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
if do_stop; then
if do_start; then
log_end_msg 0
exit 0
else
rc="$?"
fi
else
rc="$?"
fi
log_failure_msg "Can't restart etcd-proxy"
log_end_msg ${rc}
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -0,0 +1,110 @@
#!/bin/sh
set -a
### BEGIN INIT INFO
# Provides: etcd-proxy
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: etcd-proxy
# Description:
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="etcd-proxy"
NAME=etcd-proxy
DAEMON={{ bin_dir }}/etcd
DAEMON_ARGS=""
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
PID=/var/run/etcd-proxy.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -f /etc/etcd-proxy.env ] && . /etc/etcd-proxy.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
do_status()
{
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
}
# Function that starts the daemon/service
#
do_start()
{
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
RETVAL="$?"
sleep 1
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if do_stop; then
log_end_msg 0
else
log_failure_msg "Can't stop etcd-proxy"
log_end_msg 1
fi
;;
status)
if do_status; then
log_end_msg 0
else
log_failure_msg "etcd-proxy is not running"
log_end_msg 1
fi
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
if do_stop; then
if do_start; then
log_end_msg 0
exit 0
else
rc="$?"
fi
else
rc="$?"
fi
log_failure_msg "Can't restart etcd-proxy"
log_end_msg ${rc}
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -1,6 +1,6 @@
[Unit]
Description=etcd docker wrapper
Requires=docker.service
Wants=docker.service
After=docker.service
[Service]
@ -13,15 +13,10 @@ ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always
--net=host \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
-v /var/lib/etcd:/var/lib/etcd:rw \
--name={{ etcd_member_name | default("etcd-proxy") }} \
--name={{ etcd_member_name | default("etcd") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd \
{% endif %}
{% if is_etcd_master %}
--proxy off
{% else %}
--proxy on
{{ etcd_container_bin_dir }}etcd
{% endif %}
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_member_name | default("etcd-proxy") }}
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_member_name | default("etcd-proxy") }}

View File

@ -6,11 +6,7 @@ After=network.target
Type=notify
User=etcd
EnvironmentFile=/etc/etcd.env
{% if inventory_hostname in groups['etcd'] %}
ExecStart={{ bin_dir }}/etcd
{% else %}
ExecStart={{ bin_dir }}/etcd -proxy on
{% endif %}
NotifyAccess=all
Restart=always
RestartSec=10s

View File

@ -0,0 +1,28 @@
[Unit]
Description=etcd-proxy docker wrapper
Wants=docker.service
After=docker.service
[Service]
User=root
PermissionsStartOnly=true
ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always \
--env-file=/etc/etcd-proxy.env \
{# TODO(mattymo): Allow docker IP binding and disable in envfile
-p 2380:2380 -p 2379:2379 #}
--net=host \
--stop-signal=SIGKILL \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd
{% endif %}
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }}
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_proxy_member_name | default("etcd-proxy") }}
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_proxy_member_name | default("etcd-proxy") }}
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,19 @@
[Unit]
Description=etcd-proxy
After=network.target
[Service]
Type=notify
User=etcd
PermissionsStartOnly=true
EnvironmentFile=/etc/etcd-proxy.env
ExecStart={{ bin_dir }}/etcd
ExecStartPre=/bin/mkdir -p /var/lib/etcd-proxy
ExecStartPre=/bin/chown -R etcd: /var/lib/etcd-proxy
NotifyAccess=all
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,5 @@
ETCD_DATA_DIR=/var/lib/etcd-proxy
ETCD_PROXY=on
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}

View File

@ -1,14 +1,12 @@
ETCD_DATA_DIR=/var/lib/etcd
{% if is_etcd_master %}
ETCD_ADVERTISE_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address'])) }}:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ hostvars[inventory_hostname]['access_ip'] | default(hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address'])) }}:2380
ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }}
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379
ETCD_ELECTION_TIMEOUT=10000
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:2380
ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
ETCD_NAME={{ etcd_member_name }}
{% endif %}
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
ETCD_LISTEN_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:2379,http://127.0.0.1:2379

View File

@ -27,7 +27,7 @@ KUBE_API_INSECURE_BIND="--insecure-bind-address={{ kube_apiserver_insecure_bind_
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
# Location of the etcd cluster
KUBE_ETCD_SERVERS="--etcd_servers={% for host in groups['etcd'] %}http://{{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
KUBE_ETCD_SERVERS="--etcd_servers={{ etcd_access_endpoint }}"
# Bind address for secure endpoint
KUBE_API_ADDRESS="--bind-address={{ ip | default(ansible_default_ipv4.address) }}"

View File

@ -1,8 +1,8 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd.service
After=etcd.service
Wants=etcd-proxy.service
After=etcd-proxy.service
[Service]
EnvironmentFile=/etc/kubernetes/kube-apiserver.env

View File

@ -11,7 +11,7 @@ spec:
- /hyperkube
- apiserver
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={% for srv in groups['etcd'] %}http://{{ hostvars[srv]['access_ip'] | default(hostvars[srv]['ip']|default(hostvars[srv]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %}{% endfor %}
- --etcd-servers={{ etcd_access_endpoint }}
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --service-cluster-ip-range={{ kube_service_addresses }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem

View File

@ -1,7 +1,7 @@
{
"name": "calico-k8s-network",
"type": "calico",
"etcd_authority": "127.0.0.1:2379",
"etcd_authority": "{{ etcd_authority }}",
"log_level": "info",
"ipam": {
"type": "calico-ipam"

View File

@ -44,7 +44,7 @@
- name: Calico | Check if calico network pool has already been configured
uri:
url: "http://127.0.0.1:2379/v2/keys/calico/v1/ipam/v4/pool"
url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool"
return_content: yes
status_code: 200,404
register: calico_conf
@ -70,7 +70,7 @@
- name: Calico | Get calico configuration from etcd
uri:
url: "http://127.0.0.1:2379/v2/keys/calico/v1/ipam/v4/pool"
url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool"
return_content: yes
register: calico_pools
run_once: true

View File

@ -1,8 +1,8 @@
[Unit]
Description=Calico per-node agent
Documentation=https://github.com/projectcalico/calico-docker
Requires=docker.service
After=docker.service etcd.service
Wants=docker.service etcd-proxy.service
After=docker.service etcd-proxy.service
[Service]
User=root

View File

@ -1,6 +1,6 @@
#!/bin/bash
/usr/bin/docker run --privileged --rm \
--net=host -e ETCD_AUTHORITY=127.0.0.1:2379 \
--net=host -e ETCD_AUTHORITY={{ etcd_authority }} \
-v /usr/bin/docker:/usr/bin/docker \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/run/calico:/var/run/calico \

View File

@ -6,4 +6,4 @@ DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }}
KUBERNETES_MASTER={{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}
# IP and port of etcd instance used by Calico
ETCD_AUTHORITY=127.0.0.1:2379
ETCD_AUTHORITY={{ etcd_authority }}

View File

@ -21,7 +21,7 @@
args:
- "--network-config=/etc/flannel-network.json"
- "--etcd-prefix=/{{ cluster_name }}/network"
- "--etcd-server=http://{{ groups['etcd'][0] }}:2379"
- "--etcd-server={{ etcd_endpoint }}"
volumeMounts:
- name: "networkconfig"
mountPath: "/etc/flannel-network.json"
@ -31,7 +31,7 @@
command:
- "/bin/sh"
- "-c"
- "/opt/bin/flanneld -etcd-endpoints {% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} -etcd-prefix /{{ cluster_name }}/network {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}"
- "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}"
ports:
- hostPort: 10253
containerPort: 10253