mirror of https://github.com/easzlab/kubeasz.git
fix traefik ingress 安装路径
parent
144b7aeeb5
commit
21e6a48622
|
@ -19,7 +19,7 @@ $ kubectl -n kube-system create secret tls traefik-cert --key=tls.key --cert=tls
|
||||||
## 3.创建 traefik-controller,增加 traefik.toml 配置文件及https 端口暴露等,详见该 yaml 文件
|
## 3.创建 traefik-controller,增加 traefik.toml 配置文件及https 端口暴露等,详见该 yaml 文件
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ kubectl apply -f /etc/ansible/manifests/ingress/tls/traefik-controller.yaml
|
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik/tls/traefik-controller.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
## 4.创建 https ingress 例子
|
## 4.创建 https ingress 例子
|
||||||
|
@ -45,7 +45,7 @@ spec:
|
||||||
tls:
|
tls:
|
||||||
- secretName: traefik-cert
|
- secretName: traefik-cert
|
||||||
# 创建https ingress
|
# 创建https ingress
|
||||||
$ kubectl apply -f /etc/ansible/manifests/ingress/tls/hello-tls.ing.yaml
|
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik/tls/hello-tls.ing.yaml
|
||||||
# 注意根据hello示例,需要在default命名空间创建对应的secret: traefik-cert
|
# 注意根据hello示例,需要在default命名空间创建对应的secret: traefik-cert
|
||||||
$ kubectl create secret tls traefik-cert --key=tls.key --cert=tls.crt
|
$ kubectl create secret tls traefik-cert --key=tls.key --cert=tls.crt
|
||||||
```
|
```
|
||||||
|
|
|
@ -14,12 +14,12 @@ ingress就是从kubernetes集群外访问集群的入口,将用户的URL请求
|
||||||
|
|
||||||
### 部署 Traefik
|
### 部署 Traefik
|
||||||
|
|
||||||
Traefik 提供了一个简单好用 `Ingress controller`,下文基于它讲解一个简单的 ingress部署和测试例子。请查看yaml配置 [traefik-ingress.yaml](../../manifests/ingress/traefik-ingress.yaml),参考[traefik 官方k8s例子](https://github.com/containous/traefik/tree/master/examples/k8s)
|
Traefik 提供了一个简单好用 `Ingress controller`,下文基于它讲解一个简单的 ingress部署和测试例子。请查看yaml配置 [traefik-ingress.yaml](../../manifests/ingress/traefik/traefik-ingress.yaml),参考[traefik 官方k8s例子](https://github.com/containous/traefik/tree/master/examples/k8s)
|
||||||
|
|
||||||
#### 安装 traefik ingress-controller
|
#### 安装 traefik ingress-controller
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
kubectl create -f /etc/ansible/manifests/ingress/traefik-ingress.yaml
|
kubectl create -f /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
|
||||||
```
|
```
|
||||||
+ 注意需要配置 `RBAC`授权
|
+ 注意需要配置 `RBAC`授权
|
||||||
+ 注意`trafik pod`中 `80`端口为 traefik ingress-controller的服务端口,`8080`端口为 traefik 的管理WEB界面;为后续配置方便指定`80` 端口暴露`NodePort`端口为 `23456`(对应于在hosts配置中`NODE_PORT_RANGE`范围内可用端口)
|
+ 注意`trafik pod`中 `80`端口为 traefik ingress-controller的服务端口,`8080`端口为 traefik 的管理WEB界面;为后续配置方便指定`80` 端口暴露`NodePort`端口为 `23456`(对应于在hosts配置中`NODE_PORT_RANGE`范围内可用端口)
|
||||||
|
@ -71,7 +71,7 @@ spec:
|
||||||
```
|
```
|
||||||
+ 集群内部尝试访问: `curl -H Host:hello.test.com 10.68.69.170(traefik-ingress-service的服务地址)` 能够看到欢迎页面 `Welcome to nginx!`;在集群外部尝试访问(假定集群一个NodeIP为 192.168.1.1): `curl -H Host:hello.test.com 192.168.1.1:23456`,也能够看到欢迎页面 `Welcome to nginx!`,说明ingress测试成功
|
+ 集群内部尝试访问: `curl -H Host:hello.test.com 10.68.69.170(traefik-ingress-service的服务地址)` 能够看到欢迎页面 `Welcome to nginx!`;在集群外部尝试访问(假定集群一个NodeIP为 192.168.1.1): `curl -H Host:hello.test.com 192.168.1.1:23456`,也能够看到欢迎页面 `Welcome to nginx!`,说明ingress测试成功
|
||||||
|
|
||||||
+ 下面我们为traefik WEB管理页面也创建一个ingress, `kubectl create -f /etc/ansible/manifests/ingress/traefik-ui.ing.yaml`
|
+ 下面我们为traefik WEB管理页面也创建一个ingress, `kubectl create -f /etc/ansible/manifests/ingress/traefik/traefik-ui.ing.yaml`
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# traefik-ui.ing.yaml内容
|
# traefik-ui.ing.yaml内容
|
||||||
|
@ -105,9 +105,9 @@ spec:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# 修改traefik-ingress 使用 LoadBalancer服务
|
# 修改traefik-ingress 使用 LoadBalancer服务
|
||||||
$ sed -i 's/NodePort$/LoadBalancer/g' /etc/ansible/manifests/ingress/traefik-ingress.yaml
|
$ sed -i 's/NodePort$/LoadBalancer/g' /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
|
||||||
# 创建traefik-ingress
|
# 创建traefik-ingress
|
||||||
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik-ingress.yaml
|
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
|
||||||
# 验证
|
# 验证
|
||||||
$ kubectl get svc --all-namespaces |grep traefik
|
$ kubectl get svc --all-namespaces |grep traefik
|
||||||
kube-system traefik-ingress-service LoadBalancer 10.68.163.243 192.168.1.241 80:23456/TCP,8080:37088/TCP 1m
|
kube-system traefik-ingress-service LoadBalancer 10.68.163.243 192.168.1.241 80:23456/TCP,8080:37088/TCP 1m
|
||||||
|
|
|
@ -101,9 +101,9 @@ nginx3 LoadBalancer 10.68.82.227 192.168.1.240 80:38702/TCP 1m
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# 修改traefik-ingress 使用 LoadBalancer服务
|
# 修改traefik-ingress 使用 LoadBalancer服务
|
||||||
$ sed -i 's/NodePort$/LoadBalancer/g' /etc/ansible/manifests/ingress/traefik-ingress.yaml
|
$ sed -i 's/NodePort$/LoadBalancer/g' /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
|
||||||
# 创建traefik-ingress
|
# 创建traefik-ingress
|
||||||
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik-ingress.yaml
|
$ kubectl apply -f /etc/ansible/manifests/ingress/traefik/traefik-ingress.yaml
|
||||||
# 验证
|
# 验证
|
||||||
$ kubectl get svc --all-namespaces |grep traefik
|
$ kubectl get svc --all-namespaces |grep traefik
|
||||||
kube-system traefik-ingress-service LoadBalancer 10.68.163.243 192.168.1.241 80:23456/TCP,8080:37088/TCP 1m
|
kube-system traefik-ingress-service LoadBalancer 10.68.163.243 192.168.1.241 80:23456/TCP,8080:37088/TCP 1m
|
||||||
|
|
|
@ -101,7 +101,7 @@
|
||||||
when: 'traefik_offline in image_info.stdout'
|
when: 'traefik_offline in image_info.stdout'
|
||||||
|
|
||||||
- name: 创建 traefik部署
|
- name: 创建 traefik部署
|
||||||
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik-ingress.yaml"
|
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/ingress/traefik/traefik-ingress.yaml"
|
||||||
delegate_to: "{{ groups.deploy[0] }}"
|
delegate_to: "{{ groups.deploy[0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: '"traefik-ingress-controller" not in pod_info.stdout and ingress_install == "yes"'
|
when: '"traefik-ingress-controller" not in pod_info.stdout and ingress_install == "yes"'
|
||||||
|
|
Loading…
Reference in New Issue