mirror of https://github.com/easzlab/kubeasz.git
调整network/cluster-addon安装流程
parent
78cc26db8c
commit
d17d938dfc
23
90.setup.yml
23
90.setup.yml
|
@ -1,10 +1,10 @@
|
||||||
# [optional] to synchronize time of nodes with 'chrony'
|
# [optional] to synchronize time of nodes with 'chrony'
|
||||||
- hosts: all
|
- hosts: all
|
||||||
roles:
|
roles:
|
||||||
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
|
- { role: chrony, when: "groups['chrony']|length > 0" }
|
||||||
|
|
||||||
# to create CA, kubeconfig, kube-proxy.kubeconfig etc. on 'deploy' node
|
# to create CA, kubeconfig, kube-proxy.kubeconfig etc.
|
||||||
- hosts: deploy
|
- hosts: localhost
|
||||||
roles:
|
roles:
|
||||||
- deploy
|
- deploy
|
||||||
|
|
||||||
|
@ -12,17 +12,10 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
- deploy
|
|
||||||
- etcd
|
- etcd
|
||||||
- lb
|
|
||||||
roles:
|
roles:
|
||||||
- prepare
|
- prepare
|
||||||
|
|
||||||
# [optional] to install loadbalance service, only needed by multi-master cluster
|
|
||||||
- hosts: lb
|
|
||||||
roles:
|
|
||||||
- lb
|
|
||||||
|
|
||||||
# to install etcd cluster
|
# to install etcd cluster
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
roles:
|
roles:
|
||||||
|
@ -41,23 +34,20 @@
|
||||||
roles:
|
roles:
|
||||||
- kube-master
|
- kube-master
|
||||||
- kube-node
|
- kube-node
|
||||||
#
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Making master nodes SchedulingDisabled
|
- name: Making master nodes SchedulingDisabled
|
||||||
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
when: "inventory_hostname not in groups['kube-node']"
|
||||||
when: DEPLOY_MODE != "allinone"
|
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: Setting master role name
|
- name: Setting master role name
|
||||||
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
|
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
|
|
||||||
# to set up 'kube-node' nodes
|
# to set up 'kube-node' nodes
|
||||||
- hosts: kube-node
|
- hosts: kube-node
|
||||||
roles:
|
roles:
|
||||||
- { role: kube-node, when: "DEPLOY_MODE != 'allinone'" }
|
- { role: kube-node, when: "inventory_hostname not in groups['kube-master']" }
|
||||||
|
|
||||||
# to install network plugin, only one can be choosen
|
# to install network plugin, only one can be choosen
|
||||||
- hosts:
|
- hosts:
|
||||||
|
@ -74,5 +64,4 @@
|
||||||
- hosts:
|
- hosts:
|
||||||
- kube-node
|
- kube-node
|
||||||
roles:
|
roles:
|
||||||
- cluster-addon
|
- cluster-addon
|
||||||
|
|
||||||
|
|
|
@ -361,13 +361,6 @@ spec:
|
||||||
# Choose which controllers to run.
|
# Choose which controllers to run.
|
||||||
- name: ENABLED_CONTROLLERS
|
- name: ENABLED_CONTROLLERS
|
||||||
value: policy,profile,workloadendpoint,node
|
value: policy,profile,workloadendpoint,node
|
||||||
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
|
|
||||||
# if hosts have multiple net interfaces, set following two ENVs
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
{% endif %}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
# Mount in the etcd TLS secrets.
|
# Mount in the etcd TLS secrets.
|
||||||
- mountPath: /calico-secrets
|
- mountPath: /calico-secrets
|
||||||
|
|
|
@ -370,13 +370,6 @@ spec:
|
||||||
# Choose which controllers to run.
|
# Choose which controllers to run.
|
||||||
- name: ENABLED_CONTROLLERS
|
- name: ENABLED_CONTROLLERS
|
||||||
value: policy,namespace,serviceaccount,workloadendpoint,node
|
value: policy,namespace,serviceaccount,workloadendpoint,node
|
||||||
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
|
|
||||||
# if hosts have multiple net interfaces, set following two ENVs
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
{% endif %}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
# Mount in the etcd TLS secrets.
|
# Mount in the etcd TLS secrets.
|
||||||
- mountPath: /calico-secrets
|
- mountPath: /calico-secrets
|
||||||
|
|
|
@ -374,13 +374,6 @@ spec:
|
||||||
# Choose which controllers to run.
|
# Choose which controllers to run.
|
||||||
- name: ENABLED_CONTROLLERS
|
- name: ENABLED_CONTROLLERS
|
||||||
value: policy,namespace,serviceaccount,workloadendpoint,node
|
value: policy,namespace,serviceaccount,workloadendpoint,node
|
||||||
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
|
|
||||||
# if hosts have multiple net interfaces, set following two ENVs
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
{% endif %}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
# Mount in the etcd TLS secrets.
|
# Mount in the etcd TLS secrets.
|
||||||
- mountPath: /calico-secrets
|
- mountPath: /calico-secrets
|
||||||
|
|
|
@ -346,14 +346,6 @@ spec:
|
||||||
key: tofqdns-pre-cache
|
key: tofqdns-pre-cache
|
||||||
name: cilium-config
|
name: cilium-config
|
||||||
optional: true
|
optional: true
|
||||||
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
|
|
||||||
# if hosts have multiple net interfaces, set following two ENVs
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
{% endif %}
|
|
||||||
image: docker.io/cilium/cilium:{{ cilium_ver }}
|
image: docker.io/cilium/cilium:{{ cilium_ver }}
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
lifecycle:
|
lifecycle:
|
||||||
|
@ -886,14 +878,6 @@ spec:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
fieldPath: metadata.uid
|
fieldPath: metadata.uid
|
||||||
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
|
|
||||||
# if hosts have multiple net interfaces, set following two ENVs
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
{% endif %}
|
|
||||||
image: docker.io/cilium/cilium-etcd-operator:v2.0.5
|
image: docker.io/cilium/cilium-etcd-operator:v2.0.5
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
name: cilium-etcd-operator
|
name: cilium-etcd-operator
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# dns 自动安装,可选"coredns"和“kubedns”
|
# dns 自动安装,'dns_backend'可选"coredns"和“kubedns”
|
||||||
dns_install: "yes"
|
dns_install: "yes"
|
||||||
dns_backend: "coredns"
|
dns_backend: "coredns"
|
||||||
kubednsVer: "1.14.13"
|
kubednsVer: "1.14.13"
|
||||||
|
|
|
@ -17,8 +17,8 @@
|
||||||
when: 'traefik_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
when: 'traefik_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||||
|
|
||||||
- name: 创建 traefik部署
|
- name: 创建 traefik部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml"
|
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
connection: local
|
||||||
run_once: true
|
run_once: true
|
||||||
when: 'ingress_backend == "traefik"'
|
when: 'ingress_backend == "traefik"'
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
@ -42,8 +42,8 @@
|
||||||
when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
when: 'nginx_ingress_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||||
|
|
||||||
- name: 创建 nginx_ingress部署
|
- name: 创建 nginx_ingress部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
|
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/ingress/nginx-ingress/nginx-ingress.yaml"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
connection: local
|
||||||
run_once: true
|
run_once: true
|
||||||
when: 'ingress_backend == "nginx-ingress"'
|
when: 'ingress_backend == "nginx-ingress"'
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
|
@ -1,21 +1,18 @@
|
||||||
- block:
|
- name: 在 node 节点创建相关目录
|
||||||
- name: 在deploy 节点创建相关目录
|
file: path={{ item }} state=directory
|
||||||
file: path={{ item }} state=directory
|
with_items:
|
||||||
with_items:
|
- /opt/kube/kube-system
|
||||||
- /opt/kube/kube-system/kubedns
|
|
||||||
- /opt/kube/kube-system/coredns
|
# DNS文件中部分参数根据hosts文件设置而定,因此需要用template模块替换参数
|
||||||
|
- name: 准备 DNS的部署文件
|
||||||
# DNS文件中部分参数根据hosts文件设置而定,因此需要用template模块替换参数
|
template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}.yaml
|
||||||
- name: 准备 DNS的部署文件
|
with_items:
|
||||||
template: src={{ item }}.yaml.j2 dest=/opt/kube/kube-system/{{ item }}/{{ item }}.yaml
|
- kubedns
|
||||||
with_items:
|
- coredns
|
||||||
- kubedns
|
|
||||||
- coredns
|
- name: 获取所有已经创建的POD信息
|
||||||
|
command: "{{ bin_dir }}/kubectl get pod --all-namespaces"
|
||||||
- name: 获取所有已经创建的POD信息
|
register: pod_info
|
||||||
command: "{{ bin_dir }}/kubectl get pod --all-namespaces"
|
|
||||||
register: pod_info
|
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: 获取已下载离线镜像信息
|
- name: 获取已下载离线镜像信息
|
||||||
|
@ -43,9 +40,8 @@
|
||||||
when: 'dns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
when: 'dns_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||||
|
|
||||||
- name: 创建{{ dns_backend }}部署
|
- name: 创建{{ dns_backend }}部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}"
|
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ dns_backend }}.yaml"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
when:
|
when:
|
||||||
- '"kube-dns" not in pod_info.stdout'
|
- '"kube-dns" not in pod_info.stdout'
|
||||||
- '"coredns" not in pod_info.stdout'
|
- '"coredns" not in pod_info.stdout'
|
||||||
|
@ -71,9 +67,9 @@
|
||||||
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
when: 'metricsserver_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||||
|
|
||||||
- name: 创建 metrics-server部署
|
- name: 创建 metrics-server部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/metrics-server"
|
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/metrics-server"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
connection: local
|
||||||
when: '"metrics-server" not in pod_info.stdout and metricsserver_install == "yes"'
|
when: '"metrics-server" not in pod_info.stdout and metricsserver_install == "yes"'
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -97,10 +93,10 @@
|
||||||
when: 'dashboard_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
when: 'dashboard_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||||
|
|
||||||
- name: 创建 dashboard部署
|
- name: 创建 dashboard部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/dashboard && \
|
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/dashboard && \
|
||||||
{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster-only"
|
{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster-only"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
connection: local
|
||||||
when: '"kubernetes-dashboard" not in pod_info.stdout and dashboard_install == "yes"'
|
when: '"kubernetes-dashboard" not in pod_info.stdout and dashboard_install == "yes"'
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
@ -126,8 +122,8 @@
|
||||||
when: 'heapster_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
when: 'heapster_offline in image_info.stdout and CONTAINER_RUNTIME == "containerd"'
|
||||||
|
|
||||||
- name: 创建 heapster部署
|
- name: 创建 heapster部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster.yaml"
|
shell: "{{ base_dir }}/bin/kubectl apply -f {{ base_dir }}/manifests/heapster/heapster.yaml"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
connection: local
|
||||||
run_once: true
|
run_once: true
|
||||||
when: '"heapster" not in pod_info.stdout and heapster_install == "yes"'
|
when: '"heapster" not in pod_info.stdout and heapster_install == "yes"'
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
@ -155,18 +151,10 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "metallb.yaml"
|
- "metallb.yaml"
|
||||||
- "{{ metallb_protocol }}.yaml"
|
- "{{ metallb_protocol }}.yaml"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: 创建 metallb controller 部署
|
- name: 创建 metallb controller 部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/metallb.yaml"
|
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/metallb.yaml && \
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ metallb_protocol }}.yaml"
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: 创建 metallb configmap
|
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/{{ metallb_protocol }}.yaml"
|
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
when: '"metallb" not in pod_info.stdout and metallb_install == "yes"'
|
when: '"metallb" not in pod_info.stdout and metallb_install == "yes"'
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
|
|
@ -154,11 +154,6 @@ spec:
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
ports:
|
ports:
|
||||||
- name: monitoring
|
- name: monitoring
|
||||||
containerPort: 7472
|
containerPort: 7472
|
||||||
|
|
|
@ -1,9 +1,5 @@
|
||||||
# 部分flannel配置,详见roles/flannel/templates/kube-flannel.yaml.j2
|
# 部分flannel配置,参考 docs/setup/network-plugin/flannel.md
|
||||||
|
|
||||||
# 如果 node 节点有多块网卡,请设置 true
|
|
||||||
# 另外发现设置为 true 时能够解决v1.10使用ipvs偶尔出现pod内‘dial tcp 10.68.0.1:443: i/o timeout’的 bug
|
|
||||||
NODE_WITH_MULTIPLE_NETWORKS: "true"
|
|
||||||
|
|
||||||
# 设置flannel 后端
|
# 设置flannel 后端
|
||||||
#FLANNEL_BACKEND: "host-gw"
|
#FLANNEL_BACKEND: "host-gw"
|
||||||
FLANNEL_BACKEND: "vxlan"
|
FLANNEL_BACKEND: "vxlan"
|
||||||
|
|
|
@ -1,11 +1,8 @@
|
||||||
- block:
|
- name: 创建相关目录
|
||||||
- name: 在deploy 节点创建相关目录
|
file: name=/opt/kube/kube-system state=directory
|
||||||
file: name=/opt/kube/kube-system/flannel state=directory
|
|
||||||
|
|
||||||
- name: 配置 flannel DaemonSet yaml文件
|
- name: 配置 flannel DaemonSet yaml文件
|
||||||
template: src=kube-flannel.yaml.j2 dest=/opt/kube/kube-system/flannel/kube-flannel.yaml
|
template: src=kube-flannel.yaml.j2 dest=/opt/kube/kube-system/flannel.yaml
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: 创建flannel cni 相关目录
|
- name: 创建flannel cni 相关目录
|
||||||
file: name={{ item }} state=directory
|
file: name={{ item }} state=directory
|
||||||
|
@ -61,8 +58,7 @@
|
||||||
|
|
||||||
# 只需单节点执行一次
|
# 只需单节点执行一次
|
||||||
- name: 运行 flannel网络
|
- name: 运行 flannel网络
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/flannel/ && sleep 5"
|
shell: "{{ bin_dir }}/kubectl apply -f /opt/kube/kube-system/flannel.yaml && sleep 5"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
# 删除原有cni配置
|
# 删除原有cni配置
|
||||||
|
@ -74,8 +70,6 @@
|
||||||
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
|
shell: "{{ bin_dir }}/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
|
||||||
register: pod_status
|
register: pod_status
|
||||||
until: pod_status.stdout == "Running"
|
until: pod_status.stdout == "Running"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
|
||||||
retries: 15
|
retries: 15
|
||||||
delay: 8
|
delay: 8
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
|
|
|
@ -141,14 +141,6 @@ spec:
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
{% if NODE_WITH_MULTIPLE_NETWORKS == 'true' %}
|
|
||||||
# if hosts have multiple net interfaces, set following two ENVs
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: "{{ MASTER_IP }}"
|
|
||||||
#value: "{{ KUBE_APISERVER.split(':')[1].lstrip('/') }}"
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "{{ KUBE_APISERVER.split(':')[2] }}"
|
|
||||||
{% endif %}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: run
|
- name: run
|
||||||
mountPath: /run
|
mountPath: /run
|
||||||
|
|
|
@ -24,3 +24,10 @@ HARD_EVICTION: "memory.available<200Mi,nodefs.available<10%"
|
||||||
# "source": 基于请求源IP地址
|
# "source": 基于请求源IP地址
|
||||||
# "uri": 基于请求的URI
|
# "uri": 基于请求的URI
|
||||||
BALANCE_ALG: "roundrobin"
|
BALANCE_ALG: "roundrobin"
|
||||||
|
|
||||||
|
# 设置 APISERVER 地址
|
||||||
|
KUBE_APISERVER: "{%- if groups['kube-master']|length > 1 -%} \
|
||||||
|
https://127.0.0.1:6443 \
|
||||||
|
{%- else -%} \
|
||||||
|
https://{{ groups['kube-master'][0] }}:6443 \
|
||||||
|
{%- endif -%}"
|
||||||
|
|
|
@ -1,15 +1,3 @@
|
||||||
# 每个 node 节点运行 haproxy 连接到多个 apiserver
|
|
||||||
- import_tasks: node_lb.yml
|
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
|
||||||
|
|
||||||
- name: 替换 kubeconfig 的 apiserver 地址
|
|
||||||
lineinfile:
|
|
||||||
dest: /root/.kube/config
|
|
||||||
regexp: "^ server"
|
|
||||||
line: " server: https://127.0.0.1:6443"
|
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
|
||||||
|
|
||||||
# 创建kubelet,kube-proxy工作目录和cni配置目录
|
|
||||||
- name: 创建kube-node 相关目录
|
- name: 创建kube-node 相关目录
|
||||||
file: name={{ item }} state=directory
|
file: name={{ item }} state=directory
|
||||||
with_items:
|
with_items:
|
||||||
|
@ -28,6 +16,17 @@
|
||||||
- loopback
|
- loopback
|
||||||
tags: upgrade_k8s
|
tags: upgrade_k8s
|
||||||
|
|
||||||
|
# 每个 node 节点运行 haproxy 连接到多个 apiserver
|
||||||
|
- import_tasks: node_lb.yml
|
||||||
|
when: "inventory_hostname not in groups['kube-master']"
|
||||||
|
|
||||||
|
- name: 替换 kubeconfig 的 apiserver 地址
|
||||||
|
lineinfile:
|
||||||
|
dest: /root/.kube/config
|
||||||
|
regexp: "^ server"
|
||||||
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
|
when: "inventory_hostname not in groups['kube-master']"
|
||||||
|
|
||||||
##----------kubelet 配置部分--------------
|
##----------kubelet 配置部分--------------
|
||||||
|
|
||||||
- name: 准备kubelet 证书签名请求
|
- name: 准备kubelet 证书签名请求
|
||||||
|
@ -45,7 +44,7 @@
|
||||||
shell: "{{ bin_dir }}/kubectl config set-cluster kubernetes \
|
shell: "{{ bin_dir }}/kubectl config set-cluster kubernetes \
|
||||||
--certificate-authority={{ ca_dir }}/ca.pem \
|
--certificate-authority={{ ca_dir }}/ca.pem \
|
||||||
--embed-certs=true \
|
--embed-certs=true \
|
||||||
--server=https://127.0.0.1:6443 \
|
--server={{ KUBE_APISERVER }} \
|
||||||
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
|
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
when: "inventory_hostname not in groups['kube-master']"
|
||||||
|
|
||||||
|
@ -95,7 +94,7 @@
|
||||||
lineinfile:
|
lineinfile:
|
||||||
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
dest: /etc/kubernetes/kube-proxy.kubeconfig
|
||||||
regexp: "^ server"
|
regexp: "^ server"
|
||||||
line: " server: https://127.0.0.1:6443"
|
line: " server: {{ KUBE_APISERVER }}"
|
||||||
when: "inventory_hostname not in groups['kube-master']"
|
when: "inventory_hostname not in groups['kube-master']"
|
||||||
|
|
||||||
- name: 替换 kube-proxy.kubeconfig 的 apiserver 地址
|
- name: 替换 kube-proxy.kubeconfig 的 apiserver 地址
|
||||||
|
|
|
@ -21,6 +21,11 @@
|
||||||
shell: systemctl enable haproxy
|
shell: systemctl enable haproxy
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
||||||
- name: 重启haproxy服务
|
- name: 停止haproxy服务
|
||||||
shell: systemctl restart haproxy
|
shell: systemctl stop haproxy
|
||||||
|
tags: restart_lb
|
||||||
|
|
||||||
|
- name: 开启haproxy服务
|
||||||
|
shell: systemctl start haproxy
|
||||||
|
when: "groups['kube-master']|length > 1"
|
||||||
tags: restart_lb
|
tags: restart_lb
|
||||||
|
|
Loading…
Reference in New Issue