Derek Lemon 2018-06-19 15:13:19 +00:00
commit 4bceaf77ee
45 changed files with 404 additions and 221 deletions

View File

@ -40,7 +40,7 @@ To deploy the cluster you can use :
For Vagrant we need to install python dependencies for provisioning tasks.
Check if Python and pip are installed:
python -v && pip -v
python -V && pip -V
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
Install the necessary requirements
@ -103,6 +103,7 @@ Supported Components
- [weave](https://github.com/weaveworks/weave) v2.3.0
- Application
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.15.0
- [cert-manager](https://github.com/jetstack/cert-manager/releases) v0.3.0
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).

View File

@ -51,7 +51,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: true }
- { role: etcd, tags: etcd, etcd_cluster_setup: true, etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" }
- hosts: k8s-cluster:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View File

@ -52,13 +52,13 @@ You can modify how Kubespray sets up DNS for your cluster with the variables ``d
## dns_mode
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
#### dnsmasq_kubedns (default)
#### dnsmasq_kubedns
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
limitations (e.g. number of nameservers). Kubelet is instructed to use dnsmasq instead of kubedns/skydns.
It is configured to forward all DNS queries belonging to cluster services to kubedns/skydns. All
other queries are forwardet to the nameservers found in ``upstream_dns_servers`` or ``default_resolver``
#### kubedns
#### kubedns (default)
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
all queries.

View File

@ -140,6 +140,9 @@ dns_domain: "{{ cluster_name }}"
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## Used to set docker daemon iptables options to true
#docker_iptables_enabled: "true"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access

View File

@ -17,7 +17,7 @@ dockerproject_repo_key_info:
dockerproject_repo_info:
repos:
docker_dns_servers_strict: yes
docker_dns_servers_strict: true
docker_container_storage_setup: false
@ -40,3 +40,6 @@ dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg'
# Used to set docker daemon iptables options
docker_iptables_enabled: "false"

View File

@ -56,7 +56,7 @@
- name: check number of nameservers
fail:
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=no and we will only use the first 3."
msg: "Too many nameservers. You can relax this check by set docker_dns_servers_strict=false in all.yml and we will only use the first 3."
when: docker_dns_servers|length > 3 and docker_dns_servers_strict|bool
- name: rtrim number of nameservers to 3

View File

@ -1,5 +1,5 @@
[Service]
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables=false"
Environment="DOCKER_OPTS={{ docker_options|default('') }} --iptables={{ docker_iptables_enabled | default('false') }}"
{% if docker_mount_flags is defined and docker_mount_flags != "" %}
MountFlags={{ docker_mount_flags }}
{% endif %}

View File

@ -132,14 +132,14 @@ kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-aut
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox
test_image_tag: latest
elasticsearch_version: "v2.4.1"
elasticsearch_image_repo: "gcr.io/google_containers/elasticsearch"
elasticsearch_version: "v5.6.4"
elasticsearch_image_repo: "k8s.gcr.io/elasticsearch"
elasticsearch_image_tag: "{{ elasticsearch_version }}"
fluentd_version: "1.22"
fluentd_image_repo: "gcr.io/google_containers/fluentd-elasticsearch"
fluentd_version: "v2.0.4"
fluentd_image_repo: "k8s.gcr.io/fluentd-elasticsearch"
fluentd_image_tag: "{{ fluentd_version }}"
kibana_version: "v4.6.1"
kibana_image_repo: "gcr.io/google_containers/kibana"
kibana_version: "5.6.4"
kibana_image_repo: "docker.elastic.co/kibana/kibana"
kibana_image_tag: "{{ kibana_version }}"
helm_version: "v2.9.1"
helm_image_repo: "lachlanevenson/k8s-helm"
@ -160,11 +160,9 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin
ingress_nginx_controller_image_tag: "0.15.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
ingress_nginx_default_backend_image_tag: "1.4"
cert_manager_version: "v0.2.4"
cert_manager_version: "v0.3.0"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
downloads:
netcheck_server:
@ -583,14 +581,6 @@ downloads:
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups:
- kube-node
cert_manager_ingress_shim:
enabled: "{{ cert_manager_enabled }}"
container: true
repo: "{{ cert_manager_ingress_shim_image_repo }}"
tag: "{{ cert_manager_ingress_shim_image_tag }}"
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
groups:
- kube-node
download_defaults:
container: false

View File

@ -3,6 +3,9 @@
etcd_cluster_setup: true
etcd_events_cluster_setup: false
# Set to true to separate k8s events to a different etcd cluster
etcd_events_cluster_enabled: false
etcd_backup_prefix: "/var/backups"
etcd_data_dir: "/var/lib/etcd"
etcd_events_data_dir: "/var/lib/etcd-events"

View File

@ -1,9 +1,12 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: efk
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: efk

View File

@ -6,3 +6,4 @@ metadata:
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -1,15 +1,17 @@
---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
apiVersion: extensions/v1beta1
kind: Deployment
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-logging-v1
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: "{{ elasticsearch_image_tag }}"
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
serviceName: elasticsearch-logging
replicas: 2
selector:
matchLabels:
@ -53,4 +55,10 @@ spec:
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-logging-init
securityContext:
privileged: true

View File

@ -1,7 +1,7 @@
---
fluentd_cpu_limit: 0m
fluentd_mem_limit: 200Mi
fluentd_mem_limit: 500Mi
fluentd_cpu_requests: 100m
fluentd_mem_requests: 200Mi
fluentd_config_dir: /etc/kubernetes/fluentd
fluentd_config_file: fluentd.conf
fluentd_config_dir: /etc/fluent/config.d
# fluentd_config_file: fluentd.conf

View File

@ -1,10 +1,19 @@
---
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: fluentd-config
namespace: "kube-system"
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
{{ fluentd_config_file }}: |
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
@ -18,7 +27,6 @@ data:
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
# Maintainer: Jimmi Dyson <jimmidyson@gmail.com>
#
# Example
# =======
@ -99,63 +107,87 @@ data:
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
#
# TODO: Propagate the labels associated with a container along with its logs
# so users can query logs using labels as well as or instead of the pod name
# and container name. This is simply done via configuration of the Kubernetes
# fluentd plugin but requires secrets to be enabled in the fluent pod. This is a
# problem yet to be solved as secrets are not usable in static pods which the fluentd
# pod must be until a per-node controller is available in Kubernetes.
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
<match fluent.**>
type null
</match>
# Example:
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
type tail
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag kubernetes.*
format json
tag raw.kubernetes.*
read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/es-salt.pos
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
type tail
@id docker.log
@type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
@ -163,13 +195,16 @@ data:
pos_file /var/log/es-etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -179,10 +214,12 @@ data:
pos_file /var/log/es-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
@id kube-proxy.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -192,10 +229,12 @@ data:
pos_file /var/log/es-kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
@id kube-apiserver.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -205,10 +244,12 @@ data:
pos_file /var/log/es-kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
@id kube-controller-manager.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -218,10 +259,12 @@ data:
pos_file /var/log/es-kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
@id kube-scheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -231,10 +274,12 @@ data:
pos_file /var/log/es-kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
@id rescheduler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -244,10 +289,12 @@ data:
pos_file /var/log/es-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -257,10 +304,12 @@ data:
pos_file /var/log/es-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
@id cluster-autoscaler.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
@ -270,59 +319,123 @@ data:
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# Logs from systemd-journal for interesting services.
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
@id journald-docker
@type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag docker
</source>
# <source>
# @id journald-container-runtime
# @type systemd
# filters [{ "_SYSTEMD_UNIT": "{% raw %}{{ container_runtime }} {% endraw %}.service" }]
# <storage>
# @type local
# persistent true
# </storage>
# read_from_head true
# tag container-runtime
# </source>
<source>
@id journald-kubelet
@type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag kubelet
</source>
<source>
@id journald-node-problem-detector
@type systemd
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
<storage>
@type local
persistent true
</storage>
read_from_head true
tag node-problem-detector
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@type prometheus
</source>
<source>
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |-
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
type kubernetes_metadata
@type kubernetes_metadata
</filter>
## Prometheus Exporter Plugin
## input plugin that exports metrics
#<source>
# type prometheus
#</source>
#<source>
# type monitor_agent
#</source>
#<source>
# type forward
#</source>
## input plugin that collects metrics from MonitorAgent
#<source>
# @type prometheus_monitor
# <labels>
# host ${hostname}
# </labels>
#</source>
## input plugin that collects metrics for output plugin
#<source>
# @type prometheus_output_monitor
# <labels>
# host ${hostname}
# </labels>
#</source>
## input plugin that collects metrics for in_tail plugin
#<source>
# @type prometheus_tail_monitor
# <labels>
# host ${hostname}
# </labels>
#</source>
<match **>
type elasticsearch
user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
log_level info
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
# Set the chunk limit the same as for fluentd-gcp.
buffer_chunk_limit 2M
# Cap buffer memory usage to 2MiB/chunk * 32 chunks = 64 MiB
buffer_queue_limit 32
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 8
retry_forever
retry_max_interval 30
chunk_limit_size 2M
queue_limit_length 8
overflow_action block
</buffer>
</match>

View File

@ -1,32 +1,42 @@
---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-elasticsearch/es-controller.yaml
apiVersion: extensions/v1beta1
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: "fluentd-es-v{{ fluentd_version }}"
name: "fluentd-es-{{ fluentd_version }}"
namespace: "kube-system"
labels:
k8s-app: fluentd-es
version: "{{ fluentd_version }}"
kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: "{{ fluentd_version }}"
template:
metadata:
labels:
k8s-app: fluentd-es
kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}"
version: "{{ fluentd_version }}"
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- effect: NoSchedule
operator: Exists
priorityClassName: system-node-critical
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
command:
- '/bin/sh'
- '-c'
- '/usr/sbin/td-agent -c {{ fluentd_config_dir }}/{{ fluentd_config_file}} 2>&1 >> /var/log/fluentd.log'
env:
- name: FLUENTD_ARGS
value: "--no-supervisor -q"
resources:
limits:
{% if fluentd_cpu_limit is defined and fluentd_cpu_limit != "0m" %}
@ -39,22 +49,21 @@ spec:
volumeMounts:
- name: varlog
mountPath: /var/log
- name: dockercontainers
- name: varlibdockercontainers
mountPath: "{{ docker_daemon_graph }}/containers"
readOnly: true
- name: config
- name: config-volume
mountPath: "{{ fluentd_config_dir }}"
nodeSelector:
beta.kubernetes.io/fluentd-ds-ready: "true"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: dockercontainers
- name: varlibdockercontainers
hostPath:
path: {{ docker_daemon_graph }}/containers
- name: config
- name: config-volume
configMap:
name: fluentd-config
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}

View File

@ -4,3 +4,4 @@ kibana_mem_limit: 0M
kibana_cpu_requests: 100m
kibana_mem_requests: 0M
kibana_service_port: 5601
kibana_base_url: "/api/v1/namespaces/kube-system/services/kibana-logging/proxy"

View File

@ -1,6 +1,6 @@
---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.5.2/cluster/addons/fluentd-kibana/kibana-controller.yaml
apiVersion: extensions/v1beta1
# https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.10/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
@ -36,10 +36,12 @@ spec:
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:{{ elasticsearch_service_port }}"
{% if kibana_base_url is defined and kibana_base_url != "" %}
- name: "KIBANA_BASE_URL"
- name: "SERVER_BASEPATH"
value: "{{ kibana_base_url }}"
{% endif %}
- name: XPACK_MONITORING_ENABLED
value: "false"
- name: XPACK_SECURITY_ENABLED
value: "false"
ports:
- containerPort: 5601
name: ui

View File

@ -18,3 +18,6 @@ helm_skip_refresh: false
# Override values for the Tiller Deployment manifest.
# tiller_override: "key1=val1,key2=val2"
# Limit the maximum number of revisions saved per release. Use 0 for no limit.
# tiller_max_history: 0

View File

@ -34,6 +34,7 @@
{% if rbac_enabled %} --service-account=tiller{% endif %}
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
{% if tiller_override is defined %} --override {{ tiller_override }}{% endif %}
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
when: (helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed)
- name: Helm | Set up bash completion

View File

@ -5,7 +5,7 @@ metadata:
name: certificates.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:

View File

@ -5,7 +5,7 @@ metadata:
name: clusterissuers.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:

View File

@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
rules:

View File

@ -5,7 +5,7 @@ metadata:
name: cert-manager
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
roleRef:

View File

@ -6,11 +6,15 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:
replicas: 1
selector:
matchLabels:
k8s-app: cert-manager
release: cert-manager
template:
metadata:
labels:
@ -25,6 +29,7 @@ spec:
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --cluster-resource-namespace=$(POD_NAMESPACE)
- --leader-election-namespace=$(POD_NAMESPACE)
env:
- name: POD_NAMESPACE
valueFrom:
@ -37,15 +42,3 @@ spec:
limits:
cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }}
- name: ingress-shim
image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
requests:
cpu: {{ cert_manager_cpu_requests }}
memory: {{ cert_manager_memory_requests }}
limits:
cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }}

View File

@ -5,7 +5,7 @@ metadata:
name: issuers.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller
spec:

View File

@ -6,6 +6,6 @@ metadata:
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
chart: cert-manager-0.2.8
chart: cert-manager-v0.3.2
release: cert-manager
heritage: Tiller

View File

@ -1,2 +1,7 @@
---
persistent_volumes_enabled: false
storage_classes:
- name: standard
is_default: true
parameters:
availability: nova

View File

@ -1,21 +1,19 @@
---
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {file: openstack-storage-class.yml, type: StorageClass, name: storage-class }
src: "openstack-storage-class.yml.j2"
dest: "{{kube_config_dir}}/openstack-storage-class.yml"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
kube:
name: "{{item.item.name}}"
name: storage-class
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
resource: StorageClass
filename: "{{kube_config_dir}}/openstack-storage-class.yml"
state: "latest"
with_items: "{{ manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- manifests.changed

View File

@ -1,10 +0,0 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/cinder
parameters:
availability: nova

View File

@ -0,0 +1,14 @@
{% for class in storage_classes %}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: "{{ class.name }}"
annotations:
storageclass.kubernetes.io/is-default-class: "{{ class.is_default | default(false) | ternary("true","false") }}"
provisioner: kubernetes.io/cinder
parameters:
{% for key, value in (class.parameters | default({})).items() %}
"{{ key }}": "{{ value }}"
{% endfor %}
{% endfor %}

View File

@ -2,17 +2,27 @@
"kind" : "Policy",
"apiVersion" : "v1",
"predicates" : [
{"name" : "PodFitsHostPorts"},
{"name" : "PodFitsResources"},
{"name" : "MaxEBSVolumeCount"},
{"name" : "MaxGCEPDVolumeCount"},
{"name" : "MaxAzureDiskVolumeCount"},
{"name" : "MatchInterPodAffinity"},
{"name" : "NoDiskConflict"},
{"name" : "MatchNodeSelector"},
{"name" : "HostName"}
{"name" : "GeneralPredicates"},
{"name" : "CheckNodeMemoryPressure"},
{"name" : "CheckNodeDiskPressure"},
{"name" : "CheckNodePIDPressure"},
{"name" : "CheckNodeCondition"},
{"name" : "PodToleratesNodeTaints"},
{"name" : "CheckVolumeBinding"}
],
"priorities" : [
{"name" : "SelectorSpreadPriority", "weight" : 1},
{"name" : "InterPodAffinityPriority", "weight" : 1},
{"name" : "LeastRequestedPriority", "weight" : 1},
{"name" : "BalancedResourceAllocation", "weight" : 1},
{"name" : "ServiceSpreadingPriority", "weight" : 1},
{"name" : "EqualPriority", "weight" : 1}
{"name" : "NodePreferAvoidPodsPriority", "weight" : 1},
{"name" : "NodeAffinityPriority", "weight" : 1},
{"name" : "TaintTolerationPriority", "weight" : 1}
],
"hardPodAffinitySymmetricWeight" : 10
}

View File

@ -39,7 +39,7 @@ apiServerExtraArgs:
{% if kube_version | version_compare('v1.9', '>=') %}
endpoint-reconciler-type: lease
{% endif %}
{% if etcd_events_cluster_setup %}
{% if etcd_events_cluster_enabled %}
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
{% endif %}
service-node-port-range: {{ kube_apiserver_node_port_range }}

View File

@ -30,7 +30,7 @@ spec:
- apiserver
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_addresses }}
{% if etcd_events_cluster_setup %}
{% if etcd_events_cluster_enabled %}
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }}
{% endif %}
{% if kube_version | version_compare('v1.9', '<') %}

View File

@ -14,6 +14,9 @@ server = "{{ vsphere_vcenter_ip }}"
{% if vsphere_vm_uuid is defined and vsphere_vm_uuid != "" %}
vm-uuid = "{{ vsphere_vm_uuid }}"
{% endif %}
{% if vsphere_vm_name is defined and vsphere_vm_name != "" %}
vm-name = "{{ vsphere_vm_name }}"
{% endif %}
{% endif %}
{% if kube_version | version_compare('v1.9.2', '>=') %}

View File

@ -1,4 +1,8 @@
---
# Disable swap
- import_tasks: swapoff.yml
when: disable_swap
- import_tasks: verify-settings.yml
tags:
- asserts

View File

@ -0,0 +1,10 @@
---
- name: Remove swapfile from /etc/fstab
mount:
name: swap
fstype: swap
state: absent
- name: Disable swap
command: swapoff -a
when: ansible_swaptotal_mb > 0

View File

@ -17,13 +17,13 @@
- name: Stop if unknown network plugin
assert:
that: network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud']
when: network_plugin is defined
that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'contiv']
when: kube_network_plugin is defined
ignore_errors: "{{ ignore_assert_errors }}"
- name: Stop if incompatible network plugin and cloudprovider
assert:
that: network_plugin != 'calico'
that: kube_network_plugin != 'calico'
msg: "Azure and Calico are not compatible. See https://github.com/projectcalico/calicoctl/issues/949 for details."
when: cloud_provider is defined and cloud_provider == 'azure'
ignore_errors: "{{ ignore_assert_errors }}"

View File

@ -33,14 +33,14 @@
'{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %}
'{{ kube_cert_dir }}/admin-{{ host }}.pem'
'{{ kube_cert_dir }}/admin-{{ host }}.pem',
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
{% endfor %},
{% for host in groups['k8s-cluster'] %}
'{{ kube_cert_dir }}/node-{{ host }}.pem'
'{{ kube_cert_dir }}/node-{{ host }}-key.pem'
'{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem'
'{{ kube_cert_dir }}/node-{{ host }}.pem',
'{{ kube_cert_dir }}/node-{{ host }}-key.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]

View File

@ -12,6 +12,8 @@ kube_api_anonymous_auth: false
# Default value, but will be set to true automatically if detected
is_atomic: false
# optional disable the swap
disable_swap: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.10.4
@ -210,7 +212,7 @@ authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint
kubelet_authentication_token_webhook: false
kubelet_authentication_token_webhook: true
# When enabled, access to the kubelet API requires authorization by delegation to the API server
kubelet_authorization_mode_webhook: false
@ -314,7 +316,7 @@ kube_apiserver_client_key: |-
{%- endif %}
# Set to true to deploy etcd-events cluster
etcd_events_cluster_setup: false
etcd_events_cluster_enabled: false
# Vars for pointing to etcd endpoints
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"

View File

@ -51,3 +51,5 @@ rbac_resources:
# * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
# calico_ip_auto_method: "interface=eth.*"
calico_baremetal_nodename: "{{ inventory_hostname }}"

View File

@ -6,7 +6,7 @@
{% if cloud_provider is defined %}
"nodename": "{{ calico_kubelet_name.stdout }}",
{% else %}
"nodename": "{{ inventory_hostname }}",
"nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
"type": "calico",
"etcd_endpoints": "{{ etcd_access_addresses }}",

View File

@ -4,5 +4,6 @@
command: kubectl delete node {{ item }}
with_items:
- "{{ groups['kube-node'] }}"
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
ignore_errors: yes

View File

@ -11,5 +11,6 @@
with_items:
- "{{ groups['kube-node'] }}"
failed_when: false
delegate_to: "{{ groups['kube-master'][0] }}"
delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
ignore_errors: yes

View File

@ -2,3 +2,4 @@
- name: Install rkt
import_tasks: install.yml
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]

View File

@ -12,6 +12,8 @@ LimitNOFILE=40000
# Container has the following internal mount points:
# /vault/file/ # File backend storage location
# /vault/logs/ # Log files
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
ExecStart=/usr/bin/rkt run \
--insecure-options=image \
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
@ -29,9 +31,15 @@ ExecStart=/usr/bin/rkt run \
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
--name={{ vault_container_name }} --net=host \
--uuid-file-save=/var/run/vault.uuid \
--name={{ vault_container_name }} \
--net=host \
--caps-retain=CAP_IPC_LOCK \
--exec vault -- server --config={{ vault_config_dir }}/config.json
--exec vault -- \
server \
--config={{ vault_config_dir }}/config.json
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
[Install]
WantedBy=multi-user.target