mirror of https://github.com/easzlab/kubeasz.git
更新安装步骤文档
parent
583c79e6cc
commit
d7c6b8a89e
|
@ -45,7 +45,7 @@
|
||||||
roles:
|
roles:
|
||||||
- kube-node
|
- kube-node
|
||||||
|
|
||||||
# 集群网络插件部署
|
# 集群网络插件部署,只能选择一种安装
|
||||||
- hosts: kube-cluster
|
- hosts: kube-cluster
|
||||||
roles:
|
roles:
|
||||||
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
|
||||||
|
|
|
@ -81,6 +81,7 @@
|
||||||
ip link del tunl0; \
|
ip link del tunl0; \
|
||||||
ip link del flannel.1; \
|
ip link del flannel.1; \
|
||||||
ip link del cni0; \
|
ip link del cni0; \
|
||||||
|
ip link del mynet0; \
|
||||||
systemctl restart networking; \
|
systemctl restart networking; \
|
||||||
systemctl restart network"
|
systemctl restart network"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
|
|
@ -11,22 +11,35 @@
|
||||||
|
|
||||||
生产环境使用建议一个节点只是一个角色,避免性能瓶颈问题,这里演示环境将节点绑定多个角色。项目预定义了3个例子,请修改后完成适合你的集群规划。
|
生产环境使用建议一个节点只是一个角色,避免性能瓶颈问题,这里演示环境将节点绑定多个角色。项目预定义了3个例子,请修改后完成适合你的集群规划。
|
||||||
|
|
||||||
+ [单节点 AllInOne](../example/hosts.allinone.example)
|
+ [单节点](../example/hosts.allinone.example)
|
||||||
+ [单主多节点](../example/hosts.s-master.example)
|
+ [单主多节点](../example/hosts.s-master.example)
|
||||||
+ [多主多节点](../example/hosts.m-masters.example)
|
+ [多主多节点](../example/hosts.m-masters.example)
|
||||||
|
|
||||||
## 集群所用到的参数举例如下:
|
## 集群所用到的参数举例如下:
|
||||||
``` bash
|
``` bash
|
||||||
|
# ---------集群主要参数---------------
|
||||||
#集群 MASTER IP, 需要负载均衡,一般为VIP地址
|
#集群 MASTER IP, 需要负载均衡,一般为VIP地址
|
||||||
MASTER_IP="192.168.1.10"
|
MASTER_IP="192.168.1.10"
|
||||||
KUBE_APISERVER="https://192.168.1.10:8443"
|
KUBE_APISERVER="https://192.168.1.10:8443"
|
||||||
|
|
||||||
#pause镜像 基础镜像
|
#pause镜像地址
|
||||||
POD_INFRA_CONTAINER_IMAGE=mirrorgooglecontainers/pause-amd64:3.0
|
POD_INFRA_CONTAINER_IMAGE=mirrorgooglecontainers/pause-amd64:3.0
|
||||||
|
|
||||||
#TLS Bootstrapping 使用的 Token,使用 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
|
#TLS Bootstrapping 使用的 Token,使用 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
|
||||||
BOOTSTRAP_TOKEN="c30302226d4b810e08731702d3890f50"
|
BOOTSTRAP_TOKEN="c30302226d4b810e08731702d3890f50"
|
||||||
|
|
||||||
|
# 集群网络插件,目前支持calico和flannel
|
||||||
|
CLUSTER_NETWORK="calico"
|
||||||
|
|
||||||
|
# 部分calico相关配置,更全配置可以去roles/calico/templates/calico.yaml.j2自定义
|
||||||
|
# 设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 05.安装calico网络组件.md
|
||||||
|
CALICO_IPV4POOL_IPIP="always"
|
||||||
|
# 设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手动指定端口"interface=eth0"或使用>如下自动发现
|
||||||
|
IP_AUTODETECTION_METHOD="can-reach=223.5.5.5"
|
||||||
|
|
||||||
|
# 部分flannel配置,详见roles/flannel/templates/kube-flannel.yaml.j2
|
||||||
|
FLANNEL_BACKEND="vxlan"
|
||||||
|
|
||||||
# 服务网段 (Service CIDR),部署前路由不可达,部署后集群内使用 IP:Port 可达
|
# 服务网段 (Service CIDR),部署前路由不可达,部署后集群内使用 IP:Port 可达
|
||||||
SERVICE_CIDR="10.68.0.0/16"
|
SERVICE_CIDR="10.68.0.0/16"
|
||||||
|
|
||||||
|
@ -42,7 +55,7 @@ CLUSTER_KUBERNETES_SVC_IP="10.68.0.1"
|
||||||
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
|
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
|
||||||
CLUSTER_DNS_SVC_IP="10.68.0.2"
|
CLUSTER_DNS_SVC_IP="10.68.0.2"
|
||||||
|
|
||||||
# 集群 DNS 域名,后续生成 master节点证书时也会用到这个默认根域名
|
# 集群 DNS 域名
|
||||||
CLUSTER_DNS_DOMAIN="cluster.local."
|
CLUSTER_DNS_DOMAIN="cluster.local."
|
||||||
|
|
||||||
# etcd 集群间通信的IP和端口, **根据实际 etcd 集群成员设置**
|
# etcd 集群间通信的IP和端口, **根据实际 etcd 集群成员设置**
|
||||||
|
@ -51,7 +64,7 @@ ETCD_NODES="etcd1=https://192.168.1.1:2380,etcd2=https://192.168.1.2:2380,etcd3=
|
||||||
# etcd 集群服务地址列表, **根据实际 etcd 集群成员设置**
|
# etcd 集群服务地址列表, **根据实际 etcd 集群成员设置**
|
||||||
ETCD_ENDPOINTS="https://192.168.1.1:2379,https://192.168.1.2:2379,https://192.168.1.3:2379"
|
ETCD_ENDPOINTS="https://192.168.1.1:2379,https://192.168.1.2:2379,https://192.168.1.3:2379"
|
||||||
|
|
||||||
# 集群basic auth 使用的用户名和密码【可选】
|
# 集群basic auth 使用的用户名和密码
|
||||||
BASIC_AUTH_USER="admin"
|
BASIC_AUTH_USER="admin"
|
||||||
BASIC_AUTH_PASS="test1234"
|
BASIC_AUTH_PASS="test1234"
|
||||||
|
|
||||||
|
@ -65,11 +78,13 @@ ca_dir="/etc/kubernetes/ssl"
|
||||||
#部署目录,即 ansible 工作目录,建议不要修改
|
#部署目录,即 ansible 工作目录,建议不要修改
|
||||||
base_dir="/etc/ansible"
|
base_dir="/etc/ansible"
|
||||||
|
|
||||||
#私有仓库 harbor服务器 (域名或者IP) 【可选】
|
#私有仓库 harbor服务器 (域名或者IP)
|
||||||
HARBOR_IP="192.168.1.8"
|
#HARBOR_IP="192.168.1.8"
|
||||||
HARBOR_DOMAIN="harbor.mydomain.com"
|
#HARBOR_DOMAIN="harbor.yourdomain.com"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
+ 请事先规划好使用何种网络插件(calico flannel),并配置对应网络插件的参数
|
||||||
|
|
||||||
## 部署步骤
|
## 部署步骤
|
||||||
|
|
||||||
按照[多主多节点](../example/hosts.m-masters.example)示例的节点配置,至少准备4台虚机,测试搭建一个多主高可用集群。
|
按照[多主多节点](../example/hosts.m-masters.example)示例的节点配置,至少准备4台虚机,测试搭建一个多主高可用集群。
|
||||||
|
@ -136,12 +151,15 @@ mv kubeasz /etc/ansible
|
||||||
# 如果你有合适网络环境也可以按照/down/download.sh自行从官网下载各种tar包到 ./down目录,并执行download.sh
|
# 如果你有合适网络环境也可以按照/down/download.sh自行从官网下载各种tar包到 ./down目录,并执行download.sh
|
||||||
tar zxvf k8s.190.tar.gz
|
tar zxvf k8s.190.tar.gz
|
||||||
mv bin/* /etc/ansible/bin
|
mv bin/* /etc/ansible/bin
|
||||||
# 配置ansible的hosts文件,并且根据上文实际规划修改此hosts文件
|
|
||||||
cd /etc/ansible
|
cd /etc/ansible
|
||||||
cp example/hosts.m-masters.example hosts
|
cp example/hosts.m-masters.example hosts
|
||||||
|
# 根据上文实际规划修改此hosts文件
|
||||||
|
vi hosts
|
||||||
```
|
```
|
||||||
+ 验证ansible安装
|
+ 验证ansible安装
|
||||||
|
|
||||||
|
在deploy 节点使用如下命令
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
ansible all -m ping
|
ansible all -m ping
|
||||||
```
|
```
|
||||||
|
@ -171,9 +189,9 @@ ansible all -m ping
|
||||||
#ansible-playbook 02.etcd.yml
|
#ansible-playbook 02.etcd.yml
|
||||||
#ansible-playbook 03.kubectl.yml
|
#ansible-playbook 03.kubectl.yml
|
||||||
#ansible-playbook 04.docker.yml
|
#ansible-playbook 04.docker.yml
|
||||||
#ansible-playbook 05.calico.yml
|
#ansible-playbook 05.kube-master.yml
|
||||||
#ansible-playbook 06.kube-master.yml
|
#ansible-playbook 06.kube-node.yml
|
||||||
#ansible-playbook 07.kube-node.yml
|
#ansible-playbook 07.calico.yml 或者 ansible-playbook 07.flannel.yml 只能选择一种网络插件
|
||||||
#ansible-playbook 90.setup.yml # 一步安装
|
#ansible-playbook 90.setup.yml # 一步安装
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||||
- ca.csr
|
- ca.csr
|
||||||
- ca-config.json
|
- ca-config.json
|
||||||
```
|
```
|
||||||
+ force=no 保证整个安装的幂等性,如果已经生成过CA证书,就使用已经存在的CA,简单说可以多次运行 `ansible-playbook 90.setup.yml`
|
+ force=no 保证整个安装的幂等性,如果已经生成过CA证书,就使用已经存在的CA,可以多次运行 `ansible-playbook 90.setup.yml`
|
||||||
+ 如果确实需要更新CA 证书,删除/roles/prepare/files/ca* 可以使用新CA 证书
|
+ 如果确实需要更新CA 证书,删除/roles/prepare/files/ca* 可以使用新CA 证书
|
||||||
|
|
||||||
### kubedns.yaml 配置生成
|
### kubedns.yaml 配置生成
|
||||||
|
@ -96,6 +96,7 @@ cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||||
``` bash
|
``` bash
|
||||||
roles/prepare/
|
roles/prepare/
|
||||||
├── files
|
├── files
|
||||||
|
│ ├── 95-k8s-sysctl.conf
|
||||||
│ ├── ca-config.json
|
│ ├── ca-config.json
|
||||||
│ ├── ca.csr
|
│ ├── ca.csr
|
||||||
│ ├── ca-csr.json
|
│ ├── ca-csr.json
|
||||||
|
@ -110,6 +111,7 @@ roles/prepare/
|
||||||
1. 修改环境变量,把{{ bin_dir }} 添加到$PATH,需要重新登陆 shell生效
|
1. 修改环境变量,把{{ bin_dir }} 添加到$PATH,需要重新登陆 shell生效
|
||||||
1. 把证书工具 CFSSL下发到指定节点
|
1. 把证书工具 CFSSL下发到指定节点
|
||||||
1. 把CA 证书相关下发到指定节点的 {{ ca_dir }} 目录
|
1. 把CA 证书相关下发到指定节点的 {{ ca_dir }} 目录
|
||||||
|
1. 最后设置基础操作系统软件和系统参数,请阅读脚本中的注释内容
|
||||||
|
|
||||||
### LB 负载均衡部署
|
### LB 负载均衡部署
|
||||||
``` bash
|
``` bash
|
||||||
|
|
|
@ -142,4 +142,4 @@ iptables-save|grep FORWARD
|
||||||
-A FORWARD -j ACCEPT
|
-A FORWARD -j ACCEPT
|
||||||
```
|
```
|
||||||
|
|
||||||
[前一篇](03-配置kubectl命令行工具.md) -- [后一篇](05-安装calico网络组件.md)
|
[前一篇](03-配置kubectl命令行工具.md) -- [后一篇](05-安装kube-master节点.md)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## 06-安装kube-master节点.md
|
## 05-安装kube-master节点.md
|
||||||
|
|
||||||
部署master节点包含三个组件`apiserver` `scheduler` `controller-manager`,其中:
|
部署master节点包含三个组件`apiserver` `scheduler` `controller-manager`,其中:
|
||||||
|
|
||||||
|
@ -212,4 +212,4 @@ etcd-2 Healthy {"health": "true"}
|
||||||
etcd-1 Healthy {"health": "true"}
|
etcd-1 Healthy {"health": "true"}
|
||||||
```
|
```
|
||||||
|
|
||||||
[前一篇](05-安装calico网络组件.md) -- [后一篇](07-安装kube-node节点.md)
|
[前一篇](04-安装docker服务.md) -- [后一篇](06-安装kube-node节点.md)
|
||||||
|
|
|
@ -1,20 +1,18 @@
|
||||||
## 07-安装kube-node节点.md
|
## 06-安装kube-node节点.md
|
||||||
|
|
||||||
node 是集群中承载应用的节点,前置条件需要先部署好master节点(因为需要操作`用户角色绑定`、`批准kubelet TLS 证书请求`等),它需要部署如下组件:
|
`kube-node` 是集群中承载应用的节点,前置条件需要先部署好`kube-master`节点(因为需要操作`用户角色绑定`、`批准kubelet TLS 证书请求`等),它需要部署如下组件:
|
||||||
|
|
||||||
+ docker:运行容器
|
+ docker:运行容器
|
||||||
+ calico: 配置容器网络
|
+ calico: 配置容器网络 (或者 flannel)
|
||||||
+ kubelet: node上最主要的组件
|
+ kubelet: kube-node上最主要的组件
|
||||||
+ kube-proxy: 发布应用服务与负载均衡
|
+ kube-proxy: 发布应用服务与负载均衡
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
roles/kube-node
|
roles/kube-node
|
||||||
├── files
|
|
||||||
│ └── rbac.yaml
|
|
||||||
├── tasks
|
├── tasks
|
||||||
│ └── main.yml
|
│ └── main.yml
|
||||||
└── templates
|
└── templates
|
||||||
├── calico-kube-controllers.yaml.j2
|
├── cni-default.conf.j2
|
||||||
├── kubelet.service.j2
|
├── kubelet.service.j2
|
||||||
├── kube-proxy-csr.json.j2
|
├── kube-proxy-csr.json.j2
|
||||||
└── kube-proxy.service.j2
|
└── kube-proxy.service.j2
|
||||||
|
@ -56,6 +54,10 @@ kubelet 启动时向 kube-apiserver 发送 TLS bootstrapping 请求,需要先
|
||||||
+ 注意 kubelet bootstrapping认证时是靠 token的,后续由 `master`为其生成证书和私钥
|
+ 注意 kubelet bootstrapping认证时是靠 token的,后续由 `master`为其生成证书和私钥
|
||||||
+ 以上生成的bootstrap.kubeconfig配置文件需要移动到/etc/kubernetes/目录下,后续在kubelet启动参数中指定该目录下的 bootstrap.kubeconfig
|
+ 以上生成的bootstrap.kubeconfig配置文件需要移动到/etc/kubernetes/目录下,后续在kubelet启动参数中指定该目录下的 bootstrap.kubeconfig
|
||||||
|
|
||||||
|
### 创建cni 基础网络插件配置文件
|
||||||
|
|
||||||
|
因为后续需要用 `DaemonSet Pod`方式运行k8s网络插件,所以kubelet.server服务必须开启cni相关参数,并且提供cni网络配置文件
|
||||||
|
|
||||||
### 创建 kubelet 的服务文件
|
### 创建 kubelet 的服务文件
|
||||||
|
|
||||||
+ 必须先创建工作目录 `/var/lib/kubelet`
|
+ 必须先创建工作目录 `/var/lib/kubelet`
|
||||||
|
@ -73,7 +75,7 @@ WorkingDirectory=/var/lib/kubelet
|
||||||
ExecStart={{ bin_dir }}/kubelet \
|
ExecStart={{ bin_dir }}/kubelet \
|
||||||
--address={{ NODE_IP }} \
|
--address={{ NODE_IP }} \
|
||||||
--hostname-override={{ NODE_IP }} \
|
--hostname-override={{ NODE_IP }} \
|
||||||
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 \
|
--pod-infra-container-image={{ POD_INFRA_CONTAINER_IMAGE }} \
|
||||||
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
|
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
|
||||||
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
|
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
|
||||||
--cert-dir={{ ca_dir }} \
|
--cert-dir={{ ca_dir }} \
|
||||||
|
@ -189,30 +191,6 @@ WantedBy=multi-user.target
|
||||||
+ --hostname-override 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 iptables 规则
|
+ --hostname-override 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 iptables 规则
|
||||||
+ 特别注意:kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量,指定 --cluster-cidr 或 --masquerade-all 选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT;但是这个特性与calico 实现 network policy冲突,所以如果要用 network policy,这两个选项都不要指定。
|
+ 特别注意:kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量,指定 --cluster-cidr 或 --masquerade-all 选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT;但是这个特性与calico 实现 network policy冲突,所以如果要用 network policy,这两个选项都不要指定。
|
||||||
|
|
||||||
### 部署calico-kube-controllers
|
|
||||||
|
|
||||||
calico networkpolicy正常工作需要3个组件:
|
|
||||||
|
|
||||||
+ `master/node` 节点需要运行的 docker 容器 `calico/node`
|
|
||||||
+ `cni-plugin` 所需的插件二进制和配置文件
|
|
||||||
+ `calico kubernetes controllers` 负责监听Network Policy的变化,并将Policy应用到相应的网络接口
|
|
||||||
|
|
||||||
#### 准备RBAC和calico-kube-controllers.yaml 文件
|
|
||||||
|
|
||||||
- [RBAC](../roles/kube-node/files/rbac.yaml)
|
|
||||||
- 最小化权限使用
|
|
||||||
- [Controllers](../roles/kube-node/templates/calico-kube-controllers.yaml.j2)
|
|
||||||
- 注意只能跑一个 controller实例
|
|
||||||
- 注意该 controller实例需要使用宿主机网络 `hostNetwork: true`
|
|
||||||
|
|
||||||
#### 创建calico-kube-controllers
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
"sleep 15 && {{ bin_dir }}/kubectl create -f /root/local/kube-system/calico/rbac.yaml && \
|
|
||||||
{{ bin_dir }}/kubectl create -f /root/local/kube-system/calico/calico-kube-controllers.yaml"
|
|
||||||
```
|
|
||||||
+ 增加15s等待集群node ready
|
|
||||||
|
|
||||||
### 验证 node 状态
|
### 验证 node 状态
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
|
@ -225,17 +203,10 @@ journalctl -u kube-proxy
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
NAME STATUS ROLES AGE VERSION
|
NAME STATUS ROLES AGE VERSION
|
||||||
192.168.1.42 Ready <none> 2d v1.8.4
|
192.168.1.42 Ready <none> 2d v1.9.0
|
||||||
192.168.1.43 Ready <none> 2d v1.8.4
|
192.168.1.43 Ready <none> 2d v1.9.0
|
||||||
192.168.1.44 Ready <none> 2d v1.8.4
|
192.168.1.44 Ready <none> 2d v1.9.0
|
||||||
```
|
|
||||||
并且稍等一会,`kubectl get pod -n kube-system -o wide` 可以看到有个calico controller 的POD运行,且使用了host 网络
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
kubectl get pod -n kube-system -o wide
|
|
||||||
NAME READY STATUS RESTARTS AGE IP NODE
|
|
||||||
calico-kube-controllers-69bcb79c6-b444q 1/1 Running 0 2d 192.168.1.44 192.168.1.44
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[前一篇](06-安装kube-master节点.md) -- [后一篇]()
|
[前一篇](05-安装kube-master节点.md) -- [后一篇](07-安装calico网络组件.md)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## 05-安装calico网络组件.md
|
## 07-安装calico网络组件.md
|
||||||
|
|
||||||
推荐阅读[feiskyer-kubernetes指南](https://github.com/feiskyer/kubernetes-handbook) 网络相关内容
|
推荐阅读[feiskyer-kubernetes指南](https://github.com/feiskyer/kubernetes-handbook) 网络相关内容
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ Kubernetes Pod的网络是这样创建的:
|
||||||
|
|
||||||
本文档基于CNI driver 调用calico 插件来配置kubernetes的网络,常用CNI插件有 `flannel` `calico` `weave`等等,这些插件各有优势,也在互相借鉴学习优点,比如:在所有node节点都在一个二层网络时候,flannel提供hostgw实现,避免vxlan实现的udp封装开销,估计是目前最高效的;calico也针对L3 Fabric,推出了IPinIP的选项,利用了GRE隧道封装;因此这些插件都能适合很多实际应用场景,这里选择calico,主要考虑它支持 `kubernetes network policy`。
|
本文档基于CNI driver 调用calico 插件来配置kubernetes的网络,常用CNI插件有 `flannel` `calico` `weave`等等,这些插件各有优势,也在互相借鉴学习优点,比如:在所有node节点都在一个二层网络时候,flannel提供hostgw实现,避免vxlan实现的udp封装开销,估计是目前最高效的;calico也针对L3 Fabric,推出了IPinIP的选项,利用了GRE隧道封装;因此这些插件都能适合很多实际应用场景,这里选择calico,主要考虑它支持 `kubernetes network policy`。
|
||||||
|
|
||||||
推荐阅读[calico kubernetes Integration Guide](https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/integration)
|
推荐阅读[calico kubernetes guide](https://docs.projectcalico.org/v2.6/getting-started/kubernetes/)
|
||||||
|
|
||||||
calico-node需要在所有master节点和node节点安装
|
calico-node需要在所有master节点和node节点安装
|
||||||
|
|
||||||
|
@ -36,9 +36,10 @@ roles/calico/
|
||||||
├── tasks
|
├── tasks
|
||||||
│ └── main.yml
|
│ └── main.yml
|
||||||
└── templates
|
└── templates
|
||||||
|
├── calico-csr.json.j2
|
||||||
├── calicoctl.cfg.j2
|
├── calicoctl.cfg.j2
|
||||||
├── calico-node.service.j2
|
├── calico-rbac.yaml.j2
|
||||||
└── cni-calico.conf.j2
|
└── calico.yaml.j2
|
||||||
```
|
```
|
||||||
请在另外窗口打开[roles/calico/tasks/main.yml](../roles/calico/tasks/main.yml) 文件,对照看以下讲解内容。
|
请在另外窗口打开[roles/calico/tasks/main.yml](../roles/calico/tasks/main.yml) 文件,对照看以下讲解内容。
|
||||||
|
|
||||||
|
@ -69,47 +70,10 @@ roles/calico/
|
||||||
- calicoctl 操作集群网络时访问 etcd 使用证书
|
- calicoctl 操作集群网络时访问 etcd 使用证书
|
||||||
- calico/kube-controllers 同步集群网络策略时访问 etcd 使用证书
|
- calico/kube-controllers 同步集群网络策略时访问 etcd 使用证书
|
||||||
|
|
||||||
### 创建 calico-node 的服务文件 [calico-node.service.j2](../roles/calico/templates/calico-node.service.j2)
|
### 创建 calico DaemonSet yaml文件和rbac 文件
|
||||||
|
|
||||||
``` bash
|
请对照 roles/calico/templates/calico.yaml.j2文件注释和以下注意内容
|
||||||
[Unit]
|
|
||||||
Description=calico node
|
|
||||||
After=docker.service
|
|
||||||
Requires=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=root
|
|
||||||
PermissionsStartOnly=true
|
|
||||||
ExecStart={{ bin_dir }}/docker run --net=host --privileged --name=calico-node \
|
|
||||||
-e ETCD_ENDPOINTS={{ ETCD_ENDPOINTS }} \
|
|
||||||
-e ETCD_CA_CERT_FILE=/etc/calico/ssl/ca.pem \
|
|
||||||
-e ETCD_CERT_FILE=/etc/calico/ssl/calico.pem \
|
|
||||||
-e ETCD_KEY_FILE=/etc/calico/ssl/calico-key.pem \
|
|
||||||
-e CALICO_LIBNETWORK_ENABLED=true \
|
|
||||||
-e CALICO_NETWORKING_BACKEND=bird \
|
|
||||||
-e CALICO_DISABLE_FILE_LOGGING=true \
|
|
||||||
-e CALICO_IPV4POOL_CIDR={{ CLUSTER_CIDR }} \
|
|
||||||
-e CALICO_IPV4POOL_IPIP=off \
|
|
||||||
-e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
|
|
||||||
-e FELIX_IPV6SUPPORT=false \
|
|
||||||
-e FELIX_LOGSEVERITYSCREEN=info \
|
|
||||||
-e FELIX_IPINIPMTU=1440 \
|
|
||||||
-e FELIX_HEALTHENABLED=true \
|
|
||||||
-e IP= {{ NODE_IP }} \
|
|
||||||
-v /etc/calico/ssl:/etc/calico/ssl \
|
|
||||||
-v /var/run/calico:/var/run/calico \
|
|
||||||
-v /lib/modules:/lib/modules \
|
|
||||||
-v /run/docker/plugins:/run/docker/plugins \
|
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
|
||||||
-v /var/log/calico:/var/log/calico \
|
|
||||||
calico/node:v2.6.2
|
|
||||||
ExecStop={{ bin_dir }}/docker rm -f calico-node
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
+ 详细配置参数请参考[calico官方文档](https://docs.projectcalico.org/v2.6/reference/node/configuration)
|
+ 详细配置参数请参考[calico官方文档](https://docs.projectcalico.org/v2.6/reference/node/configuration)
|
||||||
+ calico-node是以docker容器运行在host上的,因此需要把之前的证书目录 /etc/calico/ssl挂载到容器中
|
+ calico-node是以docker容器运行在host上的,因此需要把之前的证书目录 /etc/calico/ssl挂载到容器中
|
||||||
+ 配置ETCD_ENDPOINTS 、CA、证书等,所有{{ }}变量与ansible hosts文件中设置对应
|
+ 配置ETCD_ENDPOINTS 、CA、证书等,所有{{ }}变量与ansible hosts文件中设置对应
|
||||||
|
@ -117,34 +81,11 @@ WantedBy=multi-user.target
|
||||||
+ **重要**本K8S集群运行在同网段kvm虚机上,虚机间没有网络ACL限制,因此可以设置`CALICO_IPV4POOL_IPIP=off`,如果你的主机位于不同网段,或者运行在公有云上需要打开这个选项 `CALICO_IPV4POOL_IPIP=always`
|
+ **重要**本K8S集群运行在同网段kvm虚机上,虚机间没有网络ACL限制,因此可以设置`CALICO_IPV4POOL_IPIP=off`,如果你的主机位于不同网段,或者运行在公有云上需要打开这个选项 `CALICO_IPV4POOL_IPIP=always`
|
||||||
+ 配置FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT 默认允许Pod到Node的网络流量,更多[felix配置选项](https://docs.projectcalico.org/v2.6/reference/felix/configuration)
|
+ 配置FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT 默认允许Pod到Node的网络流量,更多[felix配置选项](https://docs.projectcalico.org/v2.6/reference/felix/configuration)
|
||||||
|
|
||||||
### 启动calico-node
|
### 安装calico 网络
|
||||||
|
|
||||||
### 准备cni-calico配置文件 [cni-calico.conf.j2](../roles/calico/templates/cni-calico.conf.j2)
|
+ 安装之前必须确保`kube-master`和`kube-node`节点已经成功部署
|
||||||
|
+ 只需要在任意装有kubectl客户端的节点运行 `kubectl create `安装即可,脚本中选取`NODE_ID=node1`节点安装
|
||||||
``` bash
|
+ 等待15s后(视网络拉取calico相关镜像速度),calico 网络插件安装完成,删除之前kube-node安装时默认cni网络配置
|
||||||
{
|
|
||||||
"name": "calico-k8s-network",
|
|
||||||
"cniVersion": "0.1.0",
|
|
||||||
"type": "calico",
|
|
||||||
"etcd_endpoints": "{{ ETCD_ENDPOINTS }}",
|
|
||||||
"etcd_key_file": "/etc/calico/ssl/calico-key.pem",
|
|
||||||
"etcd_cert_file": "/etc/calico/ssl/calico.pem",
|
|
||||||
"etcd_ca_cert_file": "/etc/calico/ssl/ca.pem",
|
|
||||||
"log_level": "info",
|
|
||||||
"mtu": 1500,
|
|
||||||
"ipam": {
|
|
||||||
"type": "calico-ipam"
|
|
||||||
},
|
|
||||||
"policy": {
|
|
||||||
"type": "k8s"
|
|
||||||
},
|
|
||||||
"kubernetes": {
|
|
||||||
"kubeconfig": "/root/.kube/config"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
+ 主要配置etcd相关、ipam、policy等,配置选项[参考](https://docs.projectcalico.org/v2.6/reference/cni-plugin/configuration)
|
|
||||||
|
|
||||||
### [可选]配置calicoctl工具 [calicoctl.cfg.j2](roles/calico/templates/calicoctl.cfg.j2)
|
### [可选]配置calicoctl工具 [calicoctl.cfg.j2](roles/calico/templates/calicoctl.cfg.j2)
|
||||||
|
|
||||||
|
@ -162,37 +103,42 @@ spec:
|
||||||
|
|
||||||
### 验证calico网络
|
### 验证calico网络
|
||||||
|
|
||||||
执行calico安装 `ansible-playbook 05.calico.yml` 成功后可以验证如下:(需要等待calico/node:v2.6.2 镜像下载完成,有时候即便上一步已经配置了docker国内加速,还是可能比较慢,建议确认以下容器运行起来以后,再执行后续步骤)
|
执行calico安装成功后可以验证如下:(需要等待镜像下载完成,有时候即便上一步已经配置了docker国内加速,还是可能比较慢,请确认以下容器运行起来以后,再执行后续验证步骤)
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker ps
|
kubectl get pod --all-namespaces
|
||||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||||
631dde89eada calico/node:v2.6.2 "start_runit" 10 minutes ago Up 10 minutes calico-node
|
kube-system calico-kube-controllers-5c6b98d9df-xj2n4 1/1 Running 0 1m
|
||||||
|
kube-system calico-node-4hr52 2/2 Running 0 1m
|
||||||
|
kube-system calico-node-8ctc2 2/2 Running 0 1m
|
||||||
|
kube-system calico-node-9t8md 2/2 Running 0 1m
|
||||||
```
|
```
|
||||||
|
|
||||||
**查看网卡和路由信息**
|
**查看网卡和路由信息**
|
||||||
|
|
||||||
``` bash
|
先在集群创建几个测试pod: `kubectl run test --image=busybox --replicas=3 sleep 30000`
|
||||||
ip a #...省略其他网卡信息,可以看到包含类似cali1cxxx的网卡
|
|
||||||
3: caliccc295a6d4f@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
|
|
||||||
link/ether 12:79:2f:fe:8d:28 brd ff:ff:ff:ff:ff:ff link-netnsid 0
|
|
||||||
inet6 fe80::1079:2fff:fefe:8d28/64 scope link
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
5: tunl0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1
|
|
||||||
link/ipip 0.0.0.0 brd 0.0.0.0
|
|
||||||
# tunl0网卡现在不用管,是默认生成的,当开启IPIP 特性时使用的隧道
|
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# 查看网卡信息
|
||||||
|
ip a
|
||||||
|
```
|
||||||
|
|
||||||
|
+ 可以看到包含类似cali1cxxx的网卡,是calico为测试pod生成的
|
||||||
|
+ tunl0网卡现在不用管,是默认生成的,当开启IPIP 特性时使用的隧道
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# 查看路由
|
||||||
route -n
|
route -n
|
||||||
Kernel IP routing table
|
Kernel IP routing table
|
||||||
Destination Gateway Genmask Flags Metric Ref Use Iface
|
Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||||
0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 ens3
|
0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 ens3
|
||||||
192.168.1.0 0.0.0.0 255.255.255.0 U 0 0 0 ens3
|
192.168.1.0 0.0.0.0 255.255.255.0 U 0 0 0 ens3
|
||||||
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
|
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
|
||||||
172.20.3.64 192.168.1.65 255.255.255.192 UG 0 0 0 ens3
|
172.20.3.64 192.168.1.34 255.255.255.192 UG 0 0 0 ens3
|
||||||
172.20.33.128 0.0.0.0 255.255.255.192 U 0 0 0 *
|
172.20.33.128 0.0.0.0 255.255.255.192 U 0 0 0 *
|
||||||
172.20.33.129 0.0.0.0 255.255.255.255 UH 0 0 0 caliccc295a6d4f
|
172.20.33.129 0.0.0.0 255.255.255.255 UH 0 0 0 caliccc295a6d4f
|
||||||
172.20.104.0 192.168.1.37 255.255.255.192 UG 0 0 0 ens3
|
172.20.104.0 192.168.1.35 255.255.255.192 UG 0 0 0 ens3
|
||||||
172.20.166.128 192.168.1.36 255.255.255.192 UG 0 0 0 ens3
|
172.20.166.128 192.168.1.63 255.255.255.192 UG 0 0 0 ens3
|
||||||
```
|
```
|
||||||
|
|
||||||
**查看所有calico节点状态**
|
**查看所有calico节点状态**
|
||||||
|
@ -208,9 +154,6 @@ IPv4 BGP status
|
||||||
| 192.168.1.34 | node-to-node mesh | up | 12:34:00 | Established |
|
| 192.168.1.34 | node-to-node mesh | up | 12:34:00 | Established |
|
||||||
| 192.168.1.35 | node-to-node mesh | up | 12:34:00 | Established |
|
| 192.168.1.35 | node-to-node mesh | up | 12:34:00 | Established |
|
||||||
| 192.168.1.63 | node-to-node mesh | up | 12:34:01 | Established |
|
| 192.168.1.63 | node-to-node mesh | up | 12:34:01 | Established |
|
||||||
| 192.168.1.36 | node-to-node mesh | up | 12:34:00 | Established |
|
|
||||||
| 192.168.1.65 | node-to-node mesh | up | 12:34:00 | Established |
|
|
||||||
| 192.168.1.37 | node-to-node mesh | up | 12:34:15 | Established |
|
|
||||||
+--------------+-------------------+-------+----------+-------------+
|
+--------------+-------------------+-------+----------+-------------+
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -219,9 +162,6 @@ IPv4 BGP status
|
||||||
``` bash
|
``` bash
|
||||||
netstat -antlp|grep ESTABLISHED|grep 179
|
netstat -antlp|grep ESTABLISHED|grep 179
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.35:41316 ESTABLISHED 28479/bird
|
tcp 0 0 192.168.1.66:179 192.168.1.35:41316 ESTABLISHED 28479/bird
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.36:52823 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.65:56311 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:42000 192.168.1.37:179 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.34:40243 ESTABLISHED 28479/bird
|
tcp 0 0 192.168.1.66:179 192.168.1.34:40243 ESTABLISHED 28479/bird
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.63:48979 ESTABLISHED 28479/bird
|
tcp 0 0 192.168.1.66:179 192.168.1.63:48979 ESTABLISHED 28479/bird
|
||||||
```
|
```
|
||||||
|
@ -238,4 +178,4 @@ calicoctl get ipPool -o yaml
|
||||||
nat-outgoing: true
|
nat-outgoing: true
|
||||||
```
|
```
|
||||||
|
|
||||||
[前一篇](04-安装docker服务.md) -- [后一篇](06-安装kube-master节点.md)
|
[前一篇](06-安装kube-node节点.md) -- [后一篇]()
|
||||||
|
|
|
@ -1,241 +1,2 @@
|
||||||
## 05-安装calico网络组件.md
|
## 07-安装flannel网络组件.md
|
||||||
|
|
||||||
推荐阅读[feiskyer-kubernetes指南](https://github.com/feiskyer/kubernetes-handbook) 网络相关内容
|
|
||||||
|
|
||||||
首先回顾下K8S网络设计原则,在配置集群网络插件或者实践K8S 应用/服务部署请时刻想到这些原则:
|
|
||||||
|
|
||||||
- 1.每个Pod都拥有一个独立IP地址,Pod内所有容器共享一个网络命名空间
|
|
||||||
- 2.集群内所有Pod都在一个直接连通的扁平网络中,可通过IP直接访问
|
|
||||||
- 所有容器之间无需NAT就可以直接互相访问
|
|
||||||
- 所有Node和所有容器之间无需NAT就可以直接互相访问
|
|
||||||
- 容器自己看到的IP跟其他容器看到的一样
|
|
||||||
- 3.Service cluster IP尽可在集群内部访问,外部请求需要通过NodePort、LoadBalance或者Ingress来访问
|
|
||||||
|
|
||||||
`Container Network Interface (CNI)`是目前CNCF主推的网络模型,它由两部分组成:
|
|
||||||
|
|
||||||
- CNI Plugin负责给容器配置网络,它包括两个基本的接口
|
|
||||||
- 配置网络: AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
|
|
||||||
- 清理网络: DelNetwork(net *NetworkConfig, rt *RuntimeConf) error
|
|
||||||
- IPAM Plugin负责给容器分配IP地址
|
|
||||||
|
|
||||||
Kubernetes Pod的网络是这样创建的:
|
|
||||||
- 0.每个Pod除了创建时指定的容器外,都有一个kubelet启动时指定的`基础容器`,比如:`mirrorgooglecontainers/pause-amd64` `registry.access.redhat.com/rhel7/pod-infrastructure`
|
|
||||||
- 1.首先 kubelet创建`基础容器`生成network namespace
|
|
||||||
- 2.然后 kubelet调用网络CNI driver,由它根据配置调用具体的CNI 插件
|
|
||||||
- 3.然后 CNI 插件给`基础容器`配置网络
|
|
||||||
- 4.最后 Pod 中其他的容器共享使用`基础容器`的网络
|
|
||||||
|
|
||||||
本文档基于CNI driver 调用calico 插件来配置kubernetes的网络,常用CNI插件有 `flannel` `calico` `weave`等等,这些插件各有优势,也在互相借鉴学习优点,比如:在所有node节点都在一个二层网络时候,flannel提供hostgw实现,避免vxlan实现的udp封装开销,估计是目前最高效的;calico也针对L3 Fabric,推出了IPinIP的选项,利用了GRE隧道封装;因此这些插件都能适合很多实际应用场景,这里选择calico,主要考虑它支持 `kubernetes network policy`。
|
|
||||||
|
|
||||||
推荐阅读[calico kubernetes Integration Guide](https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/integration)
|
|
||||||
|
|
||||||
calico-node需要在所有master节点和node节点安装
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
roles/calico/
|
|
||||||
├── tasks
|
|
||||||
│ └── main.yml
|
|
||||||
└── templates
|
|
||||||
├── calicoctl.cfg.j2
|
|
||||||
├── calico-node.service.j2
|
|
||||||
└── cni-calico.conf.j2
|
|
||||||
```
|
|
||||||
请在另外窗口打开[roles/calico/tasks/main.yml](../roles/calico/tasks/main.yml) 文件,对照看以下讲解内容。
|
|
||||||
|
|
||||||
### 创建calico 证书申请
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
{
|
|
||||||
"CN": "calico",
|
|
||||||
"hosts": [],
|
|
||||||
"key": {
|
|
||||||
"algo": "rsa",
|
|
||||||
"size": 2048
|
|
||||||
},
|
|
||||||
"names": [
|
|
||||||
{
|
|
||||||
"C": "CN",
|
|
||||||
"ST": "HangZhou",
|
|
||||||
"L": "XS",
|
|
||||||
"O": "k8s",
|
|
||||||
"OU": "System"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
- calico 使用客户端证书,所以hosts字段可以为空;后续可以看到calico证书用在四个地方:
|
|
||||||
- calico/node 这个docker 容器运行时访问 etcd 使用证书
|
|
||||||
- cni 配置文件中,cni 插件需要访问 etcd 使用证书
|
|
||||||
- calicoctl 操作集群网络时访问 etcd 使用证书
|
|
||||||
- calico/kube-controllers 同步集群网络策略时访问 etcd 使用证书
|
|
||||||
|
|
||||||
### 创建 calico-node 的服务文件 [calico-node.service.j2](../roles/calico/templates/calico-node.service.j2)
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
[Unit]
|
|
||||||
Description=calico node
|
|
||||||
After=docker.service
|
|
||||||
Requires=docker.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=root
|
|
||||||
PermissionsStartOnly=true
|
|
||||||
ExecStart={{ bin_dir }}/docker run --net=host --privileged --name=calico-node \
|
|
||||||
-e ETCD_ENDPOINTS={{ ETCD_ENDPOINTS }} \
|
|
||||||
-e ETCD_CA_CERT_FILE=/etc/calico/ssl/ca.pem \
|
|
||||||
-e ETCD_CERT_FILE=/etc/calico/ssl/calico.pem \
|
|
||||||
-e ETCD_KEY_FILE=/etc/calico/ssl/calico-key.pem \
|
|
||||||
-e CALICO_LIBNETWORK_ENABLED=true \
|
|
||||||
-e CALICO_NETWORKING_BACKEND=bird \
|
|
||||||
-e CALICO_DISABLE_FILE_LOGGING=true \
|
|
||||||
-e CALICO_IPV4POOL_CIDR={{ CLUSTER_CIDR }} \
|
|
||||||
-e CALICO_IPV4POOL_IPIP=off \
|
|
||||||
-e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
|
|
||||||
-e FELIX_IPV6SUPPORT=false \
|
|
||||||
-e FELIX_LOGSEVERITYSCREEN=info \
|
|
||||||
-e FELIX_IPINIPMTU=1440 \
|
|
||||||
-e FELIX_HEALTHENABLED=true \
|
|
||||||
-e IP= {{ NODE_IP }} \
|
|
||||||
-v /etc/calico/ssl:/etc/calico/ssl \
|
|
||||||
-v /var/run/calico:/var/run/calico \
|
|
||||||
-v /lib/modules:/lib/modules \
|
|
||||||
-v /run/docker/plugins:/run/docker/plugins \
|
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
|
||||||
-v /var/log/calico:/var/log/calico \
|
|
||||||
calico/node:v2.6.2
|
|
||||||
ExecStop={{ bin_dir }}/docker rm -f calico-node
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
+ 详细配置参数请参考[calico官方文档](https://docs.projectcalico.org/v2.6/reference/node/configuration)
|
|
||||||
+ calico-node是以docker容器运行在host上的,因此需要把之前的证书目录 /etc/calico/ssl挂载到容器中
|
|
||||||
+ 配置ETCD_ENDPOINTS 、CA、证书等,所有{{ }}变量与ansible hosts文件中设置对应
|
|
||||||
+ 配置集群POD网络 CALICO_IPV4POOL_CIDR={{ CLUSTER_CIDR }}
|
|
||||||
+ **重要**本K8S集群运行在同网段kvm虚机上,虚机间没有网络ACL限制,因此可以设置`CALICO_IPV4POOL_IPIP=off`,如果你的主机位于不同网段,或者运行在公有云上需要打开这个选项 `CALICO_IPV4POOL_IPIP=always`
|
|
||||||
+ 配置FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT 默认允许Pod到Node的网络流量,更多[felix配置选项](https://docs.projectcalico.org/v2.6/reference/felix/configuration)
|
|
||||||
|
|
||||||
### 启动calico-node
|
|
||||||
|
|
||||||
### 准备cni-calico配置文件 [cni-calico.conf.j2](../roles/calico/templates/cni-calico.conf.j2)
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
{
|
|
||||||
"name": "calico-k8s-network",
|
|
||||||
"cniVersion": "0.1.0",
|
|
||||||
"type": "calico",
|
|
||||||
"etcd_endpoints": "{{ ETCD_ENDPOINTS }}",
|
|
||||||
"etcd_key_file": "/etc/calico/ssl/calico-key.pem",
|
|
||||||
"etcd_cert_file": "/etc/calico/ssl/calico.pem",
|
|
||||||
"etcd_ca_cert_file": "/etc/calico/ssl/ca.pem",
|
|
||||||
"log_level": "info",
|
|
||||||
"mtu": 1500,
|
|
||||||
"ipam": {
|
|
||||||
"type": "calico-ipam"
|
|
||||||
},
|
|
||||||
"policy": {
|
|
||||||
"type": "k8s"
|
|
||||||
},
|
|
||||||
"kubernetes": {
|
|
||||||
"kubeconfig": "/root/.kube/config"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
+ 主要配置etcd相关、ipam、policy等,配置选项[参考](https://docs.projectcalico.org/v2.6/reference/cni-plugin/configuration)
|
|
||||||
|
|
||||||
### [可选]配置calicoctl工具 [calicoctl.cfg.j2](roles/calico/templates/calicoctl.cfg.j2)
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
apiVersion: v1
|
|
||||||
kind: calicoApiConfig
|
|
||||||
metadata:
|
|
||||||
spec:
|
|
||||||
datastoreType: "etcdv2"
|
|
||||||
etcdEndpoints: {{ ETCD_ENDPOINTS }}
|
|
||||||
etcdKeyFile: /etc/calico/ssl/calico-key.pem
|
|
||||||
etcdCertFile: /etc/calico/ssl/calico.pem
|
|
||||||
etcdCACertFile: /etc/calico/ssl/ca.pem
|
|
||||||
```
|
|
||||||
|
|
||||||
### 验证calico网络
|
|
||||||
|
|
||||||
执行calico安装 `ansible-playbook 05.calico.yml` 成功后可以验证如下:(需要等待calico/node:v2.6.2 镜像下载完成,有时候即便上一步已经配置了docker国内加速,还是可能比较慢,建议确认以下容器运行起来以后,再执行后续步骤)
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
docker ps
|
|
||||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
|
||||||
631dde89eada calico/node:v2.6.2 "start_runit" 10 minutes ago Up 10 minutes calico-node
|
|
||||||
```
|
|
||||||
|
|
||||||
**查看网卡和路由信息**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
ip a #...省略其他网卡信息,可以看到包含类似cali1cxxx的网卡
|
|
||||||
3: caliccc295a6d4f@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
|
|
||||||
link/ether 12:79:2f:fe:8d:28 brd ff:ff:ff:ff:ff:ff link-netnsid 0
|
|
||||||
inet6 fe80::1079:2fff:fefe:8d28/64 scope link
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
5: tunl0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1
|
|
||||||
link/ipip 0.0.0.0 brd 0.0.0.0
|
|
||||||
# tunl0网卡现在不用管,是默认生成的,当开启IPIP 特性时使用的隧道
|
|
||||||
|
|
||||||
route -n
|
|
||||||
Kernel IP routing table
|
|
||||||
Destination Gateway Genmask Flags Metric Ref Use Iface
|
|
||||||
0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 ens3
|
|
||||||
192.168.1.0 0.0.0.0 255.255.255.0 U 0 0 0 ens3
|
|
||||||
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
|
|
||||||
172.20.3.64 192.168.1.65 255.255.255.192 UG 0 0 0 ens3
|
|
||||||
172.20.33.128 0.0.0.0 255.255.255.192 U 0 0 0 *
|
|
||||||
172.20.33.129 0.0.0.0 255.255.255.255 UH 0 0 0 caliccc295a6d4f
|
|
||||||
172.20.104.0 192.168.1.37 255.255.255.192 UG 0 0 0 ens3
|
|
||||||
172.20.166.128 192.168.1.36 255.255.255.192 UG 0 0 0 ens3
|
|
||||||
```
|
|
||||||
|
|
||||||
**查看所有calico节点状态**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
calicoctl node status
|
|
||||||
Calico process is running.
|
|
||||||
|
|
||||||
IPv4 BGP status
|
|
||||||
+--------------+-------------------+-------+----------+-------------+
|
|
||||||
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
|
|
||||||
+--------------+-------------------+-------+----------+-------------+
|
|
||||||
| 192.168.1.34 | node-to-node mesh | up | 12:34:00 | Established |
|
|
||||||
| 192.168.1.35 | node-to-node mesh | up | 12:34:00 | Established |
|
|
||||||
| 192.168.1.63 | node-to-node mesh | up | 12:34:01 | Established |
|
|
||||||
| 192.168.1.36 | node-to-node mesh | up | 12:34:00 | Established |
|
|
||||||
| 192.168.1.65 | node-to-node mesh | up | 12:34:00 | Established |
|
|
||||||
| 192.168.1.37 | node-to-node mesh | up | 12:34:15 | Established |
|
|
||||||
+--------------+-------------------+-------+----------+-------------+
|
|
||||||
```
|
|
||||||
|
|
||||||
**BGP 协议是通过TCP 连接来建立邻居的,因此可以用netstat 命令验证 BGP Peer**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
netstat -antlp|grep ESTABLISHED|grep 179
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.35:41316 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.36:52823 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.65:56311 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:42000 192.168.1.37:179 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.34:40243 ESTABLISHED 28479/bird
|
|
||||||
tcp 0 0 192.168.1.66:179 192.168.1.63:48979 ESTABLISHED 28479/bird
|
|
||||||
```
|
|
||||||
|
|
||||||
**查看集群ipPool情况**
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
calicoctl get ipPool -o yaml
|
|
||||||
- apiVersion: v1
|
|
||||||
kind: ipPool
|
|
||||||
metadata:
|
|
||||||
cidr: 172.20.0.0/16
|
|
||||||
spec:
|
|
||||||
nat-outgoing: true
|
|
||||||
```
|
|
||||||
|
|
||||||
[前一篇](04-安装docker服务.md) -- [后一篇](06-安装kube-master节点.md)
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ if [ -f "docker-${DOCKER_VER}.tgz" ]; then
|
||||||
tar zxf docker-${DOCKER_VER}.tgz
|
tar zxf docker-${DOCKER_VER}.tgz
|
||||||
mv docker/docker* ../bin
|
mv docker/docker* ../bin
|
||||||
if [ -f "docker/completion/bash/docker" ]; then
|
if [ -f "docker/completion/bash/docker" ]; then
|
||||||
mv -f docker/completion/bash/docker ../roles/kube-node/files/docker
|
mv -f docker/completion/bash/docker ../roles/docker/files/docker
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo 请先下载docker-${DOCKER_VER}.tgz
|
echo 请先下载docker-${DOCKER_VER}.tgz
|
||||||
|
|
|
@ -33,6 +33,11 @@
|
||||||
- name: 删除默认cni配置
|
- name: 删除默认cni配置
|
||||||
file: path=/etc/cni/net.d/10-default.conf state=absent
|
file: path=/etc/cni/net.d/10-default.conf state=absent
|
||||||
|
|
||||||
|
# 删除原有cni插件网卡mynet0
|
||||||
|
- name: 删除默认cni插件网卡mynet0
|
||||||
|
shell: "ip link del mynet0"
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
# [可选]cni calico plugins 已经在calico.yaml完成自动安装
|
# [可选]cni calico plugins 已经在calico.yaml完成自动安装
|
||||||
- name: 下载calicoctl 客户端
|
- name: 下载calicoctl 客户端
|
||||||
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
|
copy: src={{ base_dir }}/bin/{{ item }} dest={{ bin_dir }}/{{ item }} mode=0755
|
||||||
|
|
|
@ -1,41 +0,0 @@
|
||||||
# Calico Version v2.6.2
|
|
||||||
# https://docs.projectcalico.org/v2.6/releases#v2.6.2
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: calico-kube-controllers
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
- extensions
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
- namespaces
|
|
||||||
- networkpolicies
|
|
||||||
verbs:
|
|
||||||
- watch
|
|
||||||
- list
|
|
||||||
---
|
|
||||||
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: calico-kube-controllers
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: calico-kube-controllers
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: calico-kube-controllers
|
|
||||||
namespace: kube-system
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: calico-kube-controllers
|
|
||||||
namespace: kube-system
|
|
Loading…
Reference in New Issue