Derek Lemon 2018-06-19 15:13:19 +00:00
commit 4bceaf77ee
45 changed files with 404 additions and 221 deletions

View File

@ -40,7 +40,7 @@ To deploy the cluster you can use :
For Vagrant we need to install python dependencies for provisioning tasks. For Vagrant we need to install python dependencies for provisioning tasks.
Check if Python and pip are installed: Check if Python and pip are installed:
python -v && pip -v python -V && pip -V
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/> If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
Install the necessary requirements Install the necessary requirements
@ -103,6 +103,7 @@ Supported Components
- [weave](https://github.com/weaveworks/weave) v2.3.0 - [weave](https://github.com/weaveworks/weave) v2.3.0
- Application - Application
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.15.0 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.15.0
- [cert-manager](https://github.com/jetstack/cert-manager/releases) v0.3.0
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).

View File

@ -51,7 +51,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: true } - { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster:calico-rr - hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View File

@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
## dns_mode ## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available: ``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
#### dnsmasq_kubedns (default) #### dnsmasq_kubedns
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns. limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver`` other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
#### kubedns #### kubedns (default)
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
all queries. all queries.

View File

@ -140,6 +140,9 @@ dns_domain: "{{ cluster_name }}"
# Path used to store Docker data # Path used to store Docker data
docker_daemon_graph: "/var/lib/docker" docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
#docker_iptables_enabled: "true"
## A string of extra options to pass to the docker daemon. ## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear. ## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access ## An obvious use case is allowing insecure-registry access

View File

@ -17,7 +17,7 @@ dockerproject_repo_key_info:
dockerproject_repo_info: dockerproject_repo_info:
repos: repos:
docker_dns_servers_strict: yes docker_dns_servers_strict: true
docker_container_storage_setup: false docker_container_storage_setup: false
@ -40,3 +40,6 @@ dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo' dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg' dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
# Used to set docker daemon iptables options
docker_iptables_enabled: "false"

View File

@ -56,7 +56,7 @@
- name: check number of nameservers - name: check number of nameservers
fail: fail:
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3." msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3."
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3 - name: rtrim number of nameservers to 3

View File

@ -1,5 +1,5 @@
[Service] [Service]
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables=false" Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
{% if docker_mount_flags is defined and docker_mount_flags != "" %} {% if docker_mount_flags is defined and docker_mount_flags != "" %}
MountFlags={{ docker_mount_flags }} MountFlags={{ docker_mount_flags }}
{% endif %} {% endif %}

View File

@ -132,14 +132,14 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}" kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox test_image_repo: busybox
test_image_tag: latest test_image_tag: latest
elasticsearch_version: "v2.4.1" elasticsearch_version: "v5.6.4"
elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch" elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
elasticsearch_image_tag: "{{ elasticsearch_version }}" elasticsearch_image_tag: "{{ elasticsearch_version }}"
fluentd_version: "1.22" fluentd_version: "v2.0.4"
fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch" fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
fluentd_image_tag: "{{ fluentd_version }}" fluentd_image_tag: "{{ fluentd_version }}"
kibana_version: "v4.6.1" kibana_version: "5.6.4"
kibana_image_repo: "gcr.io/google_containers/kibana" kibana_image_repo: "docker.elastic.co/kibana/kibana"
kibana_image_tag: "{{ kibana_version }}" kibana_image_tag: "{{ kibana_version }}"
helm_version: "v2.9.1" helm_version: "v2.9.1"
helm_image_repo: "lachlanevenson/k8s-helm" helm_image_repo: "lachlanevenson/k8s-helm"
@ -160,11 +160,9 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin
ingress_nginx_controller_image_tag: "0.15.0" ingress_nginx_controller_image_tag: "0.15.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
ingress_nginx_default_backend_image_tag: "1.4" ingress_nginx_default_backend_image_tag: "1.4"
cert_manager_version: "v0.2.4" cert_manager_version: "v0.3.0"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}" cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
downloads: downloads:
netcheck_server: netcheck_server:
@ -583,14 +581,6 @@ downloads:
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups: groups:
- kube-node - kube-node
cert_manager_ingress_shim:
enabled: "{{ cert_manager_enabled }}"
container: true
repo: "{{ cert_manager_ingress_shim_image_repo }}"
tag: "{{ cert_manager_ingress_shim_image_tag }}"
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
groups:
- kube-node
download_defaults: download_defaults:
container: false container: false

View File

@ -3,6 +3,9 @@
etcd_cluster_setup: true etcd_cluster_setup: true
etcd_events_cluster_setup: false etcd_events_cluster_setup: false
# Set to true to separate k8s events to a different etcd cluster
etcd_events_cluster_enabled: false
etcd_backup_prefix: "/var/backups" etcd_backup_prefix: "/var/backups"
etcd_data_dir: "/var/lib/etcd" etcd_data_dir: "/var/lib/etcd"
etcd_events_data_dir: "/var/lib/etcd-events" etcd_events_data_dir: "/var/lib/etcd-events"

View File

@ -1,9 +1,12 @@
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: efk name: efk
namespace: kube-system namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: efk name: efk

View File

@ -6,3 +6,4 @@ metadata:
namespace: kube-system namespace: kube-system
labels: labels:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -1,15 +1,17 @@
--- ---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml # https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: Deployment kind: StatefulSet
metadata: metadata:
name: elasticsearch-logging-v1 name: elasticsearch-logging
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: elasticsearch-logging k8s-app: elasticsearch-logging
version: "{{ elasticsearch_image_tag }}" version: "{{ elasticsearch_image_tag }}"
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec: spec:
serviceName: elasticsearch-logging
replicas: 2 replicas: 2
selector: selector:
matchLabels: matchLabels:
@ -53,4 +55,10 @@ spec:
{% if rbac_enabled %} {% if rbac_enabled %}
serviceAccountName: efk serviceAccountName: efk
{% endif %} {% endif %}
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-logging-init
securityContext:
privileged: true

View File

@ -1,7 +1,7 @@
--- ---
fluentd_cpu_limit: 0m fluentd_cpu_limit: 0m
fluentd_mem_limit: 200Mi fluentd_mem_limit: 500Mi
fluentd_cpu_requests: 100m fluentd_cpu_requests: 100m
fluentd_mem_requests: 200Mi fluentd_mem_requests: 200Mi
fluentd_config_dir: /etc/kubernetes/fluentd fluentd_config_dir: /etc/fluent/config.d
fluentd_config_file: fluentd.conf # fluentd_config_file: fluentd.conf

View File

@ -1,10 +1,19 @@
---
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: fluentd-config name: fluentd-config
namespace: "kube-system" namespace: "kube-system"
labels:
addonmanager.kubernetes.io/mode: Reconcile
data: data:
{{ fluentd_config_file }}: | system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used # This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that # to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID # capture the pod name, namespace, container name & Docker container ID
@ -18,7 +27,6 @@ data:
# See https://github.com/uken/fluent-plugin-elasticsearch & # See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins. # more information about the plugins.
# Maintainer: Jimmi Dyson <jimmidyson@gmail.com>
# #
# Example # Example
# ======= # =======
@ -99,63 +107,87 @@ data:
# This makes it easier for users to search for logs by pod name or by # This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the # the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs). # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
#
# TODO: Propagate the labels associated with a container along with its logs # Json Log Example:
# so users can query logs using labels as well as or instead of the pod name
# and container name. This is simply done via configuration of the Kubernetes
# fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
# problem yet to be solved as secrets are not usable in static pods which the fluentd
# pod must be until a per-node controller is available in Kubernetes.
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
<match fluent.**>
type null
</match>
# Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source> <source>
type tail @id fluentd-containers.log
@type tail
path /var/log/containers/*.log path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ time_format %Y-%m-%dT%H:%M:%S.%NZ
tag kubernetes.* tag raw.kubernetes.*
format json
read_from_head true read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source> </source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example: # Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source> <source>
type tail @id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/ format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion path /var/log/salt/minion
pos_file /var/log/es-salt.pos pos_file /var/log/salt.pos
tag salt tag salt
</source> </source>
# Example: # Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source> <source>
type tail @id startupscript.log
@type tail
format syslog format syslog
path /var/log/startupscript.log path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos pos_file /var/log/es-startupscript.log.pos
tag startupscript tag startupscript
</source> </source>
# Examples: # Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json" # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404 # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source> <source>
type tail @id docker.log
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/ format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log path /var/log/docker.log
pos_file /var/log/es-docker.log.pos pos_file /var/log/es-docker.log.pos
tag docker tag docker
</source> </source>
# Example: # Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source> <source>
type tail @id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to # Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities). # parse out of it (like severities).
format none format none
@ -163,13 +195,16 @@ data:
pos_file /var/log/es-etcd.log.pos pos_file /var/log/es-etcd.log.pos
tag etcd tag etcd
</source> </source>
# Multi-line parsing is required for all the kube logs because very large log # Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into # statements, such as those that include entire object bodies, get split into
# multiple lines by glog. # multiple lines by glog.
# Example: # Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537] # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source> <source>
type tail @id kubelet.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -179,10 +214,12 @@ data:
pos_file /var/log/es-kubelet.log.pos pos_file /var/log/es-kubelet.log.pos
tag kubelet tag kubelet
</source> </source>
# Example: # Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed # I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source> <source>
type tail @id kube-proxy.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -192,10 +229,12 @@ data:
pos_file /var/log/es-kube-proxy.log.pos pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy tag kube-proxy
</source> </source>
# Example: # Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266] # I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source> <source>
type tail @id kube-apiserver.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -205,10 +244,12 @@ data:
pos_file /var/log/es-kube-apiserver.log.pos pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver tag kube-apiserver
</source> </source>
# Example: # Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui # I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source> <source>
type tail @id kube-controller-manager.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -218,10 +259,12 @@ data:
pos_file /var/log/es-kube-controller-manager.log.pos pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager tag kube-controller-manager
</source> </source>
# Example: # Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312] # W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source> <source>
type tail @id kube-scheduler.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -231,10 +274,12 @@ data:
pos_file /var/log/es-kube-scheduler.log.pos pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler tag kube-scheduler
</source> </source>
# Example: # Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler # I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source> <source>
type tail @id rescheduler.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -244,10 +289,12 @@ data:
pos_file /var/log/es-rescheduler.log.pos pos_file /var/log/es-rescheduler.log.pos
tag rescheduler tag rescheduler
</source> </source>
# Example: # Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source> <source>
type tail @id glbc.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -257,10 +304,12 @@ data:
pos_file /var/log/es-glbc.log.pos pos_file /var/log/es-glbc.log.pos
tag glbc tag glbc
</source> </source>
# Example: # Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source> <source>
type tail @id cluster-autoscaler.log
@type tail
format multiline format multiline
multiline_flush_interval 5s multiline_flush_interval 5s
format_firstline /^\w\d{4}/ format_firstline /^\w\d{4}/
@ -270,59 +319,123 @@ data:
pos_file /var/log/es-cluster-autoscaler.log.pos pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler tag cluster-autoscaler
</source> </source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag docker
</source>
# <source>
# @id journald-container-runtime
# @type systemd
# filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
# <storage>
# @type local
# persistent true
# </storage>
# read_from_head true
# tag container-runtime
# </source>
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag kubelet
</source>
<source>
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@type prometheus
</source>
<source>
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |-
# Enriches records with Kubernetes metadata
<filter kubernetes.**> <filter kubernetes.**>
type kubernetes_metadata @type kubernetes_metadata
</filter> </filter>
## Prometheus Exporter Plugin
## input plugin that exports metrics
#<source>
# type prometheus
#</source>
#<source>
# type monitor_agent
#</source>
#<source>
# type forward
#</source>
## input plugin that collects metrics from MonitorAgent
#<source>
# @type prometheus_monitor
# <labels>
# host ${hostname}
# </labels>
#</source>
## input plugin that collects metrics for output plugin
#<source>
# @type prometheus_output_monitor
# <labels>
# host ${hostname}
# </labels>
#</source>
## input plugin that collects metrics for in_tail plugin
#<source>
# @type prometheus_tail_monitor
# <labels>
# host ${hostname}
# </labels>
#</source>
<match **> <match **>
type elasticsearch @id elasticsearch
user "#{ENV['FLUENT_ELASTICSEARCH_USER']}" @type elasticsearch
password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}" @log_level info
log_level info include_tag_key true
include_tag_key true host elasticsearch-logging
host elasticsearch-logging port 9200
port 9200 logstash_format true
logstash_format true <buffer>
# Set the chunk limit the same as for fluentd-gcp. @type file
buffer_chunk_limit 2M path /var/log/fluentd-buffers/kubernetes.system.buffer
# Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB flush_mode interval
buffer_queue_limit 32 retry_type exponential_backoff
flush_interval 5s flush_thread_count 2
# Never wait longer than 5 minutes between retries. flush_interval 5s
max_retry_wait 30 retry_forever
# Disable the limit on the number of retries (retry forever). retry_max_interval 30
disable_retry_limit chunk_limit_size 2M
# Use multiple threads for processing. queue_limit_length 8
num_threads 8 overflow_action block
</match> </buffer>
</match>

View File

@ -1,32 +1,42 @@
--- ---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml # https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: "fluentd-es-v{{ fluentd_version }}" name: "fluentd-es-{{ fluentd_version }}"
namespace: "kube-system" namespace: "kube-system"
labels: labels:
k8s-app: fluentd-es k8s-app: fluentd-es
version: "{{ fluentd_version }}"
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}" addonmanager.kubernetes.io/mode: Reconcile
spec: spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: "{{ fluentd_version }}"
template: template:
metadata: metadata:
labels: labels:
k8s-app: fluentd-es k8s-app: fluentd-es
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}" version: "{{ fluentd_version }}"
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
tolerations: priorityClassName: system-node-critical
- effect: NoSchedule {% if rbac_enabled %}
operator: Exists serviceAccountName: efk
{% endif %}
containers: containers:
- name: fluentd-es - name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}" image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
command: env:
- '/bin/sh' - name: FLUENTD_ARGS
- '-c' value: "--no-supervisor -q"
- '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
resources: resources:
limits: limits:
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %} {% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
@ -34,27 +44,26 @@ spec:
{% endif %} {% endif %}
memory: {{ fluentd_mem_limit }} memory: {{ fluentd_mem_limit }}
requests: requests:
cpu: {{ fluentd_cpu_requests }} cpu: {{ fluentd_cpu_requests }}
memory: {{ fluentd_mem_requests }} memory: {{ fluentd_mem_requests }}
volumeMounts: volumeMounts:
- name: varlog - name: varlog
mountPath: /var/log mountPath: /var/log
- name: dockercontainers - name: varlibdockercontainers
mountPath: "{{ docker_daemon_graph }}/containers" mountPath: "{{ docker_daemon_graph }}/containers"
readOnly: true readOnly: true
- name: config - name: config-volume
mountPath: "{{ fluentd_config_dir }}" mountPath: "{{ fluentd_config_dir }}"
nodeSelector:
beta.kubernetes.io/fluentd-ds-ready: "true"
terminationGracePeriodSeconds: 30 terminationGracePeriodSeconds: 30
volumes: volumes:
- name: varlog - name: varlog
hostPath: hostPath:
path: /var/log path: /var/log
- name: dockercontainers - name: varlibdockercontainers
hostPath: hostPath:
path: {{ docker_daemon_graph }}/containers path: {{ docker_daemon_graph }}/containers
- name: config - name: config-volume
configMap: configMap:
name: fluentd-config name: fluentd-config
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}

View File

@ -4,3 +4,4 @@ kibana_mem_limit: 0M
kibana_cpu_requests: 100m kibana_cpu_requests: 100m
kibana_mem_requests: 0M kibana_mem_requests: 0M
kibana_service_port: 5601 kibana_service_port: 5601
kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"

View File

@ -1,6 +1,6 @@
--- ---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml # https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: kibana-logging name: kibana-logging
@ -36,10 +36,12 @@ spec:
env: env:
- name: "ELASTICSEARCH_URL" - name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}" value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
{% if kibana_base_url is defined and kibana_base_url != "" %} - name: "SERVER_BASEPATH"
- name: "KIBANA_BASE_URL"
value: "{{ kibana_base_url }}" value: "{{ kibana_base_url }}"
{% endif %} - name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED
value: "false"
ports: ports:
- containerPort: 5601 - containerPort: 5601
name: ui name: ui

View File

@ -18,3 +18,6 @@ helm_skip_refresh: false
# Override values for the Tiller Deployment manifest. # Override values for the Tiller Deployment manifest.
# tiller_override: "key1=val1,key2=val2" # tiller_override: "key1=val1,key2=val2"
# Limit the maximum number of revisions saved per release. Use 0 for no limit.
# tiller_max_history: 0

View File

@ -34,6 +34,7 @@
{% if rbac_enabled %} --service-account=tiller{% endif %} {% if rbac_enabled %} --service-account=tiller{% endif %}
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %} {% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %} {% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed) when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
- name: Helm | Set up bash completion - name: Helm | Set up bash completion

View File

@ -5,7 +5,7 @@ metadata:
name: certificates.certmanager.k8s.io name: certificates.certmanager.k8s.io
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View File

@ -5,7 +5,7 @@ metadata:
name: clusterissuers.certmanager.k8s.io name: clusterissuers.certmanager.k8s.io
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View File

@ -5,7 +5,7 @@ metadata:
name: cert-manager name: cert-manager
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
rules: rules:

View File

@ -5,7 +5,7 @@ metadata:
name: cert-manager name: cert-manager
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
roleRef: roleRef:

View File

@ -6,11 +6,15 @@ metadata:
namespace: {{ cert_manager_namespace }} namespace: {{ cert_manager_namespace }}
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:
replicas: 1 replicas: 1
selector:
matchLabels:
k8s-app: cert-manager
release: cert-manager
template: template:
metadata: metadata:
labels: labels:
@ -25,6 +29,7 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }} imagePullPolicy: {{ k8s_image_pull_policy }}
args: args:
- --cluster-resource-namespace=$(POD_NAMESPACE) - --cluster-resource-namespace=$(POD_NAMESPACE)
- --leader-election-namespace=$(POD_NAMESPACE)
env: env:
- name: POD_NAMESPACE - name: POD_NAMESPACE
valueFrom: valueFrom:
@ -37,15 +42,3 @@ spec:
limits: limits:
cpu: {{ cert_manager_cpu_limits }} cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }} memory: {{ cert_manager_memory_limits }}
- name: ingress-shim
image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
requests:
cpu: {{ cert_manager_cpu_requests }}
memory: {{ cert_manager_memory_requests }}
limits:
cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }}

View File

@ -5,7 +5,7 @@ metadata:
name: issuers.certmanager.k8s.io name: issuers.certmanager.k8s.io
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller
spec: spec:

View File

@ -6,6 +6,6 @@ metadata:
namespace: {{ cert_manager_namespace }} namespace: {{ cert_manager_namespace }}
labels: labels:
app: cert-manager app: cert-manager
chart: cert-manager-0.2.8 chart: cert-manager-v0.3.2
release: cert-manager release: cert-manager
heritage: Tiller heritage: Tiller

View File

@ -1,2 +1,7 @@
--- ---
persistent_volumes_enabled: false persistent_volumes_enabled: false
storage_classes:
- name: standard
is_default: true
parameters:
availability: nova

View File

@ -1,21 +1,19 @@
--- ---
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template - name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
template: template:
src: "{{item.file}}" src: "openstack-storage-class.yml.j2"
dest: "{{kube_config_dir}}/{{item.file}}" dest: "{{kube_config_dir}}/openstack-storage-class.yml"
with_items:
- {file: openstack-storage-class.yml, type: StorageClass, name: storage-class }
register: manifests register: manifests
when: when:
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class - name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube: kube:
name: "{{item.item.name}}" name: storage-class
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}" resource: StorageClass
filename: "{{kube_config_dir}}/{{item.item.file}}" filename: "{{kube_config_dir}}/openstack-storage-class.yml"
state: "latest" state: "latest"
with_items: "{{ manifests.results }}"
when: when:
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]
- manifests.changed

View File

@ -1,10 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/cinder
parameters:
availability: nova

View File

@ -0,0 +1,14 @@
{% for class in storage_classes %}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: "{{ class.name }}"
annotations:
storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
provisioner: kubernetes.io/cinder
parameters:
{% for key, value in (class.parameters | default({})).items() %}
"{{ key }}": "{{ value }}"
{% endfor %}
{% endfor %}

View File

@ -2,17 +2,27 @@
"kind" : "Policy", "kind" : "Policy",
"apiVersion" : "v1", "apiVersion" : "v1",
"predicates" : [ "predicates" : [
{"name" : "PodFitsHostPorts"}, {"name" : "MaxEBSVolumeCount"},
{"name" : "PodFitsResources"}, {"name" : "MaxGCEPDVolumeCount"},
{"name" : "MaxAzureDiskVolumeCount"},
{"name" : "MatchInterPodAffinity"},
{"name" : "NoDiskConflict"}, {"name" : "NoDiskConflict"},
{"name" : "MatchNodeSelector"}, {"name" : "GeneralPredicates"},
{"name" : "HostName"} {"name" : "CheckNodeMemoryPressure"},
{"name" : "CheckNodeDiskPressure"},
{"name" : "CheckNodePIDPressure"},
{"name" : "CheckNodeCondition"},
{"name" : "PodToleratesNodeTaints"},
{"name" : "CheckVolumeBinding"}
], ],
"priorities" : [ "priorities" : [
{"name" : "SelectorSpreadPriority", "weight" : 1},
{"name" : "InterPodAffinityPriority", "weight" : 1},
{"name" : "LeastRequestedPriority", "weight" : 1}, {"name" : "LeastRequestedPriority", "weight" : 1},
{"name" : "BalancedResourceAllocation", "weight" : 1}, {"name" : "BalancedResourceAllocation", "weight" : 1},
{"name" : "ServiceSpreadingPriority", "weight" : 1}, {"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
{"name" : "EqualPriority", "weight" : 1} {"name" : "NodeAffinityPriority", "weight" : 1},
{"name" : "TaintTolerationPriority", "weight" : 1}
], ],
"hardPodAffinitySymmetricWeight" : 10 "hardPodAffinitySymmetricWeight" : 10
} }

View File

@ -39,7 +39,7 @@ apiServerExtraArgs:
{% if kube_version | version_compare('v1.9', '>=') %} {% if kube_version | version_compare('v1.9', '>=') %}
endpoint-reconciler-type: lease endpoint-reconciler-type: lease
{% endif %} {% endif %}
{% if etcd_events_cluster_setup %} {% if etcd_events_cluster_enabled %}
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}" etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
{% endif %} {% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }} service-node-port-range: {{ kube_apiserver_node_port_range }}

View File

@ -30,7 +30,7 @@ spec:
- apiserver - apiserver
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }} - --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_addresses }} - --etcd-servers={{ etcd_access_addresses }}
{% if etcd_events_cluster_setup %} {% if etcd_events_cluster_enabled %}
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }} - --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }}
{% endif %} {% endif %}
{% if kube_version | version_compare('v1.9', '<') %} {% if kube_version | version_compare('v1.9', '<') %}

View File

@ -14,6 +14,9 @@ server = "{{ vsphere_vcenter_ip }}"
{% if vsphere_vm_uuid is defined and vsphere_vm_uuid != "" %} {% if vsphere_vm_uuid is defined and vsphere_vm_uuid != "" %}
vm-uuid = "{{ vsphere_vm_uuid }}" vm-uuid = "{{ vsphere_vm_uuid }}"
{% endif %} {% endif %}
{% if vsphere_vm_name is defined and vsphere_vm_name != "" %}
vm-name = "{{ vsphere_vm_name }}"
{% endif %}
{% endif %} {% endif %}
{% if kube_version | version_compare('v1.9.2', '>=') %} {% if kube_version | version_compare('v1.9.2', '>=') %}

View File

@ -1,4 +1,8 @@
--- ---
# Disable swap
- import_tasks: swapoff.yml
when: disable_swap
- import_tasks: verify-settings.yml - import_tasks: verify-settings.yml
tags: tags:
- asserts - asserts

View File

@ -0,0 +1,10 @@
---
- name: Remove swapfile from /etc/fstab
mount:
name: swap
fstype: swap
state: absent
- name: Disable swap
command: swapoff -a
when: ansible_swaptotal_mb > 0

View File

@ -17,13 +17,13 @@
- name: Stop if unknown network plugin - name: Stop if unknown network plugin
assert: assert:
that: network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud'] that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'contiv']
when: network_plugin is defined when: kube_network_plugin is defined
ignore_errors: "{{ ignore_assert_errors }}" ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if incompatible network plugin and cloudprovider - name: Stop if incompatible network plugin and cloudprovider
assert: assert:
that: network_plugin != 'calico' that: kube_network_plugin != 'calico'
msg: "Azure and Calico are not compatible. See https://github.com/projectcalico/calicoctl/issues/949 for details." msg: "Azure and Calico are not compatible. See https://github.com/projectcalico/calicoctl/issues/949 for details."
when: cloud_provider is defined and cloud_provider == 'azure' when: cloud_provider is defined and cloud_provider == 'azure'
ignore_errors: "{{ ignore_assert_errors }}" ignore_errors: "{{ ignore_assert_errors }}"

View File

@ -33,14 +33,14 @@
'{{ kube_cert_dir }}/front-proxy-client-key.pem', '{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem', '{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %} {% for host in groups['kube-master'] %}
'{{ kube_cert_dir }}/admin-{{ host }}.pem' '{{ kube_cert_dir }}/admin-{{ host }}.pem',
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem' '{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %} {% if not loop.last %}{{','}}{% endif %}
{% endfor %}] {% endfor %},
{% for host in groups['k8s-cluster'] %} {% for host in groups['k8s-cluster'] %}
'{{ kube_cert_dir }}/node-{{ host }}.pem' '{{ kube_cert_dir }}/node-{{ host }}.pem',
'{{ kube_cert_dir }}/node-{{ host }}-key.pem' '{{ kube_cert_dir }}/node-{{ host }}-key.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem' '{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem' '{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %} {% if not loop.last %}{{','}}{% endif %}
{% endfor %}] {% endfor %}]

View File

@ -12,6 +12,8 @@ kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected # Default value, but will be set to true automatically if detected
is_atomic: false is_atomic: false
# optional disable the swap
disable_swap: false
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.10.4 kube_version: v1.10.4
@ -210,7 +212,7 @@ authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}" rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint # When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint
kubelet_authentication_token_webhook: false kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server # When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false kubelet_authorization_mode_webhook: false
@ -314,7 +316,7 @@ kube_apiserver_client_key: |-
{%- endif %} {%- endif %}
# Set to true to deploy etcd-events cluster # Set to true to deploy etcd-events cluster
etcd_events_cluster_setup: false etcd_events_cluster_enabled: false
# Vars for pointing to etcd endpoints # Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"

View File

@ -51,3 +51,5 @@ rbac_resources:
# * interface=INTERFACE-REGEX # * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods # see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
# calico_ip_auto_method: "interface=eth.*" # calico_ip_auto_method: "interface=eth.*"
calico_baremetal_nodename: "{{ inventory_hostname }}"

View File

@ -6,7 +6,7 @@
{% if cloud_provider is defined %} {% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}", "nodename": "{{ calico_kubelet_name.stdout }}",
{% else %} {% else %}
"nodename": "{{ inventory_hostname }}", "nodename": "{{ calico_baremetal_nodename }}",
{% endif %} {% endif %}
"type": "calico", "type": "calico",
"etcd_endpoints": "{{ etcd_access_addresses }}", "etcd_endpoints": "{{ etcd_access_addresses }}",

View File

@ -4,5 +4,6 @@
command: kubectl delete node {{ item }} command: kubectl delete node {{ item }}
with_items: with_items:
- "{{ groups['kube-node'] }}" - "{{ groups['kube-node'] }}"
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
ignore_errors: yes ignore_errors: yes

View File

@ -11,5 +11,6 @@
with_items: with_items:
- "{{ groups['kube-node'] }}" - "{{ groups['kube-node'] }}"
failed_when: false failed_when: false
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
ignore_errors: yes ignore_errors: yes

View File

@ -2,3 +2,4 @@
- name: Install rkt - name: Install rkt
import_tasks: install.yml import_tasks: install.yml
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]

View File

@ -12,26 +12,34 @@ LimitNOFILE=40000
# Container has the following internal mount points: # Container has the following internal mount points:
# /vault/file/ # File backend storage location # /vault/file/ # File backend storage location
# /vault/logs/ # Log files # /vault/logs/ # Log files
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
ExecStart=/usr/bin/rkt run \ ExecStart=/usr/bin/rkt run \
--insecure-options=image \ --insecure-options=image \
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \ --volume hosts,kind=host,source=/etc/hosts,readOnly=true \
--mount volume=hosts,target=/etc/hosts \ --mount volume=hosts,target=/etc/hosts \
--volume=volume-vault-file,kind=host,source=/var/lib/vault \ --volume=volume-vault-file,kind=host,source=/var/lib/vault \
--volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \ --volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
--volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \ --volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
--mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \ --mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
--volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \ --volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
--mount=volume=vault-conf-dir,target={{ vault_config_dir }} \ --mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
--volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \ --volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
--mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \ --mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
--volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \ --volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
--mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \ --mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \ --volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \ --mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
docker://{{ vault_image_repo }}:{{ vault_image_tag }} \ docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
--name={{ vault_container_name }} --net=host \ --uuid-file-save=/var/run/vault.uuid \
--caps-retain=CAP_IPC_LOCK \ --name={{ vault_container_name }} \
--exec vault -- server --config={{ vault_config_dir }}/config.json --net=host \
--caps-retain=CAP_IPC_LOCK \
--exec vault -- \
server \
--config={{ vault_config_dir }}/config.json
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target