feat: add support to set nodenaem

pull/1226/head
gjmzj 2023-01-15 21:41:45 +08:00
parent ee750e3a2e
commit 7a2311cffd
18 changed files with 61 additions and 85 deletions

View File

@ -191,20 +191,7 @@ WantedBy=multi-user.target
项目master 分支使用 DaemonSet 方式安装网络插件如果master 节点不安装 kubelet 服务是无法安装网络插件的,如果 master 节点不安装网络插件,那么通过`apiserver` 方式无法访问 `dashboard` `kibana`等管理界面,[ISSUES #130](https://github.com/easzlab/kubeasz/issues/130)
``` bash
# vi 04.kube-master.yml
- hosts: kube_master
roles:
- kube_master
- kube_node
# 禁止业务 pod调度到 master节点
tasks:
- name: 禁止业务 pod调度到 master节点
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
```
在master 节点也同时成为 node 节点后,默认业务 POD也会调度到 master节点多主模式下这显然增加了 master节点的负载因此可以使用 `kubectl cordon`命令禁止业务 POD调度到 master节点
在master 节点也同时成为 node 节点后,默认业务 POD也会调度到 master节点可以使用 `kubectl cordon`命令禁止业务 POD调度到 master节点。
### master 集群的验证

View File

@ -56,7 +56,7 @@ ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/hugetlb/system.slice
ExecStart={{ bin_dir }}/kubelet \
--config=/var/lib/kubelet/config.yaml \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--hostname-override={{ inventory_hostname }} \
--hostname-override={{ K8S_NODENAME }} \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--root-dir={{ KUBELET_ROOT_DIR }} \
--v=2

View File

@ -2,13 +2,13 @@
[etcd]
192.168.1.1
# master node(s)
# master node(s), set unique 'k8s_nodename' for each node
[kube_master]
192.168.1.1
192.168.1.1 k8s_nodename=''
# work node(s)
# work node(s), set unique 'k8s_nodename' for each node
[kube_node]
192.168.1.1
192.168.1.1 k8s_nodename=''
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
@ -63,3 +63,10 @@ cluster_dir="{{ base_dir }}/clusters/_cluster_name_"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
# set unique 'k8s_nodename' for each node, if not set(default:'') ip add will be used
K8S_NODENAME: "{%- if k8s_nodename != '' -%} \
{{ k8s_nodename }} \
{%- else -%} \
{{ inventory_hostname }} \
{%- endif -%}"

View File

@ -4,15 +4,15 @@
192.168.1.2
192.168.1.3
# master node(s)
# master node(s), set unique 'k8s_nodename' for each node
[kube_master]
192.168.1.1
192.168.1.2
192.168.1.1 k8s_nodename=''
192.168.1.2 k8s_nodename=''
# work node(s)
# work node(s), set unique 'k8s_nodename' for each node
[kube_node]
192.168.1.3
192.168.1.4
192.168.1.3 k8s_nodename=''
192.168.1.4 k8s_nodename=''
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
@ -67,3 +67,10 @@ cluster_dir="{{ base_dir }}/clusters/_cluster_name_"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
# set unique 'k8s_nodename' for each node, if not set(default:'') ip add will be used
K8S_NODENAME: "{%- if k8s_nodename != '' -%} \
{{ k8s_nodename }} \
{%- else -%} \
{{ inventory_hostname }} \
{%- endif -%}"

View File

@ -4,14 +4,3 @@
- kube-lb
- kube-master
- kube-node
tasks:
- name: Making master nodes SchedulingDisabled
shell: "{{ base_dir }}/bin/kubectl cordon {{ inventory_hostname }} "
when: "inventory_hostname not in groups['kube_node']"
ignore_errors: true
connection: local
- name: Setting master role name
shell: "{{ base_dir }}/bin/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
connection: local

View File

@ -15,15 +15,3 @@
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
- { role: kube-ovn, when: "CLUSTER_NETWORK == 'kube-ovn'" }
#
tasks:
- name: Making master nodes SchedulingDisabled
shell: "{{ base_dir }}/bin/kubectl cordon {{ NODE_TO_ADD }} "
when: "inventory_hostname not in groups['kube_node']"
ignore_errors: true
connection: local
- name: Setting master role name
shell: "{{ base_dir }}/bin/kubectl label node {{ NODE_TO_ADD }} kubernetes.io/role=master --overwrite"
ignore_errors: true
connection: local

View File

@ -41,17 +41,6 @@
- kube-lb
- kube-master
- kube-node
tasks:
- name: Making master nodes SchedulingDisabled
shell: "{{ base_dir }}/bin/kubectl cordon {{ inventory_hostname }} "
when: "inventory_hostname not in groups['kube_node']"
ignore_errors: true
connection: local
- name: Setting master role name
shell: "{{ base_dir }}/bin/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
connection: local
# to set up 'kube_node' nodes
- hosts: kube_node

View File

@ -59,7 +59,7 @@
template: src=calicoctl.cfg.j2 dest=/etc/calico/calicoctl.cfg
- name: 轮询等待calico-node 运行
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'calico-node'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'calico-node'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
retries: 15

View File

@ -35,7 +35,7 @@
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待cilium-node 运行
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -owide -lk8s-app=cilium|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -owide -lk8s-app=cilium|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
retries: 15

View File

@ -28,7 +28,7 @@
file: path=/etc/cni/net.d/10-default.conf state=absent
- name: 轮询等待flannel 运行,视下载镜像速度而定
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
retries: 15

View File

@ -110,18 +110,18 @@
- block:
- name: 复制kubectl.kubeconfig
shell: 'cd {{ cluster_dir }} && cp -f kubectl.kubeconfig {{ inventory_hostname }}-kubectl.kubeconfig'
shell: 'cd {{ cluster_dir }} && cp -f kubectl.kubeconfig {{ K8S_NODENAME }}-kubectl.kubeconfig'
tags: upgrade_k8s, restart_master, force_change_certs
- name: 替换 kubeconfig 的 apiserver 地址
lineinfile:
dest: "{{ cluster_dir }}/{{ inventory_hostname }}-kubectl.kubeconfig"
dest: "{{ cluster_dir }}/{{ K8S_NODENAME }}-kubectl.kubeconfig"
regexp: "^ server"
line: " server: https://{{ inventory_hostname }}:{{ SECURE_PORT }}"
tags: upgrade_k8s, restart_master, force_change_certs
- name: 轮询等待master服务启动完成
command: "{{ base_dir }}/bin/kubectl --kubeconfig={{ cluster_dir }}/{{ inventory_hostname }}-kubectl.kubeconfig get node"
command: "{{ base_dir }}/bin/kubectl --kubeconfig={{ cluster_dir }}/{{ K8S_NODENAME }}-kubectl.kubeconfig get node"
register: result
until: result.rc == 0
retries: 5

View File

@ -1,47 +1,47 @@
- block:
- name: 准备kubelet 证书签名请求
template: src=kubelet-csr.json.j2 dest={{ cluster_dir }}/ssl/{{ inventory_hostname }}-kubelet-csr.json
template: src=kubelet-csr.json.j2 dest={{ cluster_dir }}/ssl/{{ K8S_NODENAME }}-kubelet-csr.json
- name: 创建 kubelet 证书与私钥
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes {{ inventory_hostname }}-kubelet-csr.json | {{ base_dir }}/bin/cfssljson -bare {{ inventory_hostname }}-kubelet"
-profile=kubernetes {{ K8S_NODENAME }}-kubelet-csr.json | {{ base_dir }}/bin/cfssljson -bare {{ K8S_NODENAME }}-kubelet"
- name: 设置集群参数
shell: "{{ base_dir }}/bin/kubectl config set-cluster kubernetes \
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
--embed-certs=true \
--server={{ KUBE_APISERVER }} \
--kubeconfig={{ cluster_dir }}/{{ inventory_hostname }}-kubelet.kubeconfig"
--kubeconfig={{ cluster_dir }}/{{ K8S_NODENAME }}-kubelet.kubeconfig"
- name: 设置客户端认证参数
shell: "{{ base_dir }}/bin/kubectl config set-credentials system:node:{{ inventory_hostname }} \
--client-certificate={{ cluster_dir }}/ssl/{{ inventory_hostname }}-kubelet.pem \
shell: "{{ base_dir }}/bin/kubectl config set-credentials system:node:{{ K8S_NODENAME }} \
--client-certificate={{ cluster_dir }}/ssl/{{ K8S_NODENAME }}-kubelet.pem \
--embed-certs=true \
--client-key={{ cluster_dir }}/ssl/{{ inventory_hostname }}-kubelet-key.pem \
--kubeconfig={{ cluster_dir }}/{{ inventory_hostname }}-kubelet.kubeconfig"
--client-key={{ cluster_dir }}/ssl/{{ K8S_NODENAME }}-kubelet-key.pem \
--kubeconfig={{ cluster_dir }}/{{ K8S_NODENAME }}-kubelet.kubeconfig"
- name: 设置上下文参数
shell: "{{ base_dir }}/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=system:node:{{ inventory_hostname }} \
--kubeconfig={{ cluster_dir }}/{{ inventory_hostname }}-kubelet.kubeconfig"
--user=system:node:{{ K8S_NODENAME }} \
--kubeconfig={{ cluster_dir }}/{{ K8S_NODENAME }}-kubelet.kubeconfig"
- name: 选择默认上下文
shell: "{{ base_dir }}/bin/kubectl config use-context default \
--kubeconfig={{ cluster_dir }}/{{ inventory_hostname }}-kubelet.kubeconfig"
--kubeconfig={{ cluster_dir }}/{{ K8S_NODENAME }}-kubelet.kubeconfig"
connection: local
- name: 分发ca 证书
copy: src={{ cluster_dir }}/ssl/ca.pem dest={{ ca_dir }}/ca.pem
- name: 分发kubelet 证书
copy: src={{ cluster_dir }}/ssl/{{ inventory_hostname }}-{{ item }} dest={{ ca_dir }}/{{ item }}
copy: src={{ cluster_dir }}/ssl/{{ K8S_NODENAME }}-{{ item }} dest={{ ca_dir }}/{{ item }}
with_items:
- kubelet.pem
- kubelet-key.pem
- name: 分发kubeconfig
copy: src={{ cluster_dir }}/{{ inventory_hostname }}-kubelet.kubeconfig dest=/etc/kubernetes/kubelet.kubeconfig
copy: src={{ cluster_dir }}/{{ K8S_NODENAME }}-kubelet.kubeconfig dest=/etc/kubernetes/kubelet.kubeconfig

View File

@ -93,7 +93,7 @@
tags: reload-kube-proxy, upgrade_k8s, restart_node, force_change_certs
- name: 轮询等待node达到Ready状态
shell: "{{ base_dir }}/bin/kubectl get node {{ inventory_hostname }}|awk 'NR>1{print $2}'"
shell: "{{ base_dir }}/bin/kubectl get node {{ K8S_NODENAME }}|awk 'NR>1{print $2}'"
register: node_status
until: node_status.stdout == "Ready" or node_status.stdout == "Ready,SchedulingDisabled"
retries: 8
@ -101,7 +101,16 @@
tags: upgrade_k8s, restart_node, force_change_certs
connection: local
- name: 设置node节点role
shell: "{{ base_dir }}/bin/kubectl label node {{ inventory_hostname }} kubernetes.io/role=node --overwrite"
- block:
- name: Setting worker role name
shell: "{{ base_dir }}/bin/kubectl label node {{ K8S_NODENAME }} kubernetes.io/role=node --overwrite"
- name: Setting master role name
shell: "{{ base_dir }}/bin/kubectl label node {{ K8S_NODENAME }} kubernetes.io/role=master --overwrite"
when: "inventory_hostname in groups['kube_master']"
- name: Making master nodes SchedulingDisabled
shell: "{{ base_dir }}/bin/kubectl cordon {{ K8S_NODENAME }} "
when: "inventory_hostname not in groups['kube_node']"
ignore_errors: true
connection: local

View File

@ -12,6 +12,6 @@ conntrack:
tcpEstablishedTimeout: 24h0m0s
healthzBindAddress: 0.0.0.0:10256
# hostnameOverride 值必须与 kubelet 的对应一致,否则 kube-proxy 启动后会找不到该 Node从而不会创建任何 iptables 规则
hostnameOverride: "{{ inventory_hostname }}"
hostnameOverride: "{{ K8S_NODENAME }}"
metricsBindAddress: 0.0.0.0:10249
mode: "{{ PROXY_MODE }}"

View File

@ -1,5 +1,5 @@
{
"CN": "system:node:{{ inventory_hostname }}",
"CN": "system:node:{{ K8S_NODENAME }}",
"hosts": [
"127.0.0.1",
"{{ inventory_hostname }}"

View File

@ -30,7 +30,7 @@ ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/hugetlb/system.slice
ExecStart={{ bin_dir }}/kubelet \
--config=/var/lib/kubelet/config.yaml \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--hostname-override={{ inventory_hostname }} \
--hostname-override={{ K8S_NODENAME }} \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--root-dir={{ KUBELET_ROOT_DIR }} \
--v=2

View File

@ -38,7 +38,7 @@
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待kube-ovn 运行,视下载镜像速度而定
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-ovn -o wide|grep 'kube-ovn-cni'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-ovn -o wide|grep 'kube-ovn-cni'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
retries: 15

View File

@ -31,7 +31,7 @@
# 等待网络插件部署成功,视下载镜像速度而定
- name: 轮询等待kube-router 运行,视下载镜像速度而定
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'kube-router'|grep ' {{ inventory_hostname }} '|awk '{print $3}'"
shell: "{{ base_dir }}/bin/kubectl get pod -n kube-system -o wide|grep 'kube-router'|grep ' {{ K8S_NODENAME }} '|awk '{print $3}'"
register: pod_status
until: pod_status.stdout == "Running"
retries: 15