kubespray/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2

66 lines
2.1 KiB
Django/Jinja

# This manifest deploys the Contiv API Proxy Server on Kubernetes.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: contiv-api-proxy
namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: contiv-api-proxy
template:
metadata:
name: contiv-api-proxy
namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
priorityClassName: system-node-critical
# The API proxy must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
serviceAccountName: contiv-netmaster
containers:
- name: contiv-api-proxy
image: {{ contiv_auth_proxy_image_repo }}:{{ contiv_auth_proxy_image_tag }}
args:
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
- --tls-key-file=/var/contiv/auth_proxy_key.pem
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
- --data-store-driver=$(STORE_DRIVER)
- --data-store-address=$(CONTIV_ETCD)
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
env:
- name: NO_NETMASTER_STARTUP_CHECK
value: "0"
- name: STORE_DRIVER
value: etcd
- name: CONTIV_ETCD
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
securityContext:
privileged: false
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
- name: var-contiv
hostPath:
path: /var/contiv