用inventory_hostname替换变量NODE_IP

pull/243/merge
jmgao 2018-06-09 22:19:20 +08:00
parent 2340b9f214
commit 931b2cf1b9
23 changed files with 67 additions and 67 deletions

View File

@ -5,10 +5,10 @@
# 禁止业务 pod调度到 master节点
tasks:
- name: 禁止业务 pod调度到 master节点
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_IP }} "
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
shell: "{{ bin_dir }}/kubectl label node {{ NODE_IP }} kubernetes.io/role=master --overwrite"
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true

View File

@ -18,10 +18,10 @@
# 禁止业务 pod调度到 master节点
tasks:
- name: 禁止业务 pod调度到 master节点
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_IP }} "
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
shell: "{{ bin_dir }}/kubectl label node {{ NODE_IP }} kubernetes.io/role=master --overwrite"
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true

View File

@ -38,12 +38,12 @@
# 禁止业务 pod调度到 master节点
tasks:
- name: 禁止业务 pod调度到 master节点
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_IP }} "
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
shell: "{{ bin_dir }}/kubectl label node {{ NODE_IP }} kubernetes.io/role=master --overwrite"
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
# node 节点部署

View File

@ -23,7 +23,7 @@ kuberntes 系统使用 etcd 存储所有数据,是最重要的组件之一,
"CN": "etcd",
"hosts": [
"127.0.0.1",
"{{ NODE_IP }}"
"{{ inventory_hostname }}"
],
"key": {
"algo": "rsa",
@ -75,10 +75,10 @@ ExecStart={{ bin_dir }}/etcd \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file={{ ca_dir }}/ca.pem \
--peer-trusted-ca-file={{ ca_dir }}/ca.pem \
--initial-advertise-peer-urls=https://{{ NODE_IP }}:2380 \
--listen-peer-urls=https://{{ NODE_IP }}:2380 \
--listen-client-urls=https://{{ NODE_IP }}:2379,http://127.0.0.1:2379 \
--advertise-client-urls=https://{{ NODE_IP }}:2379 \
--initial-advertise-peer-urls=https://{{ inventory_hostname }}:2380 \
--listen-peer-urls=https://{{ inventory_hostname }}:2380 \
--listen-client-urls=https://{{ inventory_hostname }}:2379,http://127.0.0.1:2379 \
--advertise-client-urls=https://{{ inventory_hostname }}:2379 \
--initial-cluster-token=etcd-cluster-0 \
--initial-cluster={{ ETCD_NODES }} \
--initial-cluster-state=new \

View File

@ -38,7 +38,7 @@ roles/kube-master/
"hosts": [
"127.0.0.1",
"{{ MASTER_IP }}",
"{{ NODE_IP }}",
"{{ inventory_hostname }}",
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
"kubernetes",
"kubernetes.default",
@ -89,7 +89,7 @@ After=network.target
[Service]
ExecStart={{ bin_dir }}/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--bind-address={{ NODE_IP }} \
--bind-address={{ inventory_hostname }} \
--insecure-bind-address=127.0.0.1 \
--authorization-mode=Node,RBAC \
--runtime-config=rbac.authorization.k8s.io/v1 \
@ -207,7 +207,7 @@ WantedBy=multi-user.target
# 禁止业务 pod调度到 master节点
tasks:
- name: 禁止业务 pod调度到 master节点
shell: "{{ bin_dir }}/kubectl cordon {{ NODE_IP }} "
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
```

View File

@ -49,8 +49,8 @@ Requires=docker.service
WorkingDirectory=/var/lib/kubelet
#--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest
ExecStart={{ bin_dir }}/kubelet \
--address={{ NODE_IP }} \
--hostname-override={{ NODE_IP }} \
--address={{ inventory_hostname }} \
--hostname-override={{ inventory_hostname }} \
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.1 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
@ -102,8 +102,8 @@ After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart={{ bin_dir }}/kube-proxy \
--bind-address={{ NODE_IP }} \
--hostname-override={{ NODE_IP }} \
--bind-address={{ inventory_hostname }} \
--hostname-override={{ inventory_hostname }} \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
--logtostderr=true \
--v=2

View File

@ -19,7 +19,7 @@ mv docker-compose-Linux-x86_64 /etc/ansible/bin/docker-compose
``` bash
# 如果启用harbor请配置后面harbor相关参数
[harbor]
192.168.1.8 NODE_IP="192.168.1.8"
192.168.1.8
#私有仓库 harbor服务器 (域名或者IP)
HARBOR_IP="192.168.1.8"
@ -51,7 +51,7 @@ HARBOR_DOMAIN="harbor.test.com"
### 验证harbor
1. 在harbor节点使用`docker ps -a` 查看harbor容器组件运行情况
1. 浏览器访问harbor节点的IP地址 `https://{{ NODE_IP }}`,使用账号 admin 和 密码 Harbor12345 (harbor.cfg 配置文件中的默认)登陆系统
1. 浏览器访问harbor节点的IP地址 `https://$NodeIP`,使用账号 admin 和 密码 Harbor12345 (harbor.cfg 配置文件中的默认)登陆系统
### 在k8s集群使用harbor

View File

@ -5,7 +5,7 @@ kubelet默认启动参数`--anonymous-auth=true`风险非常大,黑客可以
## 关于漏洞的危害
据我所知k8s v1.5+ 所有版本的kubelet组件的默认启动参数是允许匿名访问kubelet的默认的大坑你可以使用如下命令检查你的集群
`curl -sk https://$NODE_IP:10250/runningpods/`
`curl -sk https://$NodeIP:10250/runningpods/`
- 如果返回了运行的pod信息说明是允许匿名访问的
- 如果返回`Unauthorized`,说明是安全的

View File

@ -27,7 +27,7 @@ master2="192.168.1.2:6443"
master3="192.168.1.5:6443" # 新增 master节点
...
[new-master]
192.168.1.5 NODE_IP="192.168.1.5" # 新增 master节点
192.168.1.5 # 新增 master节点
```
- 修改roles/lb/templates/haproxy.cfg.j2 文件增加新增的master节点举例如下

View File

@ -16,7 +16,7 @@
...
# 预留组后续添加node节点使用
[new-node]
192.168.1.6 NODE_ID=node6 NODE_IP="192.168.1.6"
192.168.1.6 NODE_ID=node6
...
```
- 执行安装脚本

View File

@ -2,23 +2,23 @@
[deploy]
192.168.1.1
# etcd集群请提供如下NODE_NAME、NODE_IP变量,注意etcd集群必须是1,3,5,7...奇数个节点
# etcd集群请提供如下NODE_NAME注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.1.1 NODE_NAME=etcd1 NODE_IP="192.168.1.1"
192.168.1.1 NODE_NAME=etcd1
[kube-master]
192.168.1.1 NODE_IP="192.168.1.1"
192.168.1.1
[kube-node]
192.168.1.1 NODE_IP="192.168.1.1"
192.168.1.1
# 如果启用harbor请配置后面harbor相关参数
[harbor]
#192.168.1.8 NODE_IP="192.168.1.8"
#192.168.1.8
# 预留组后续添加node节点使用
[new-node]
#192.168.1.xx NODE_IP="192.168.1.xx"
#192.168.1.xx
[all:vars]
# ---------集群主要参数---------------

View File

@ -2,15 +2,15 @@
[deploy]
192.168.1.1
# etcd集群请提供如下NODE_NAME、NODE_IP变量,注意etcd集群必须是1,3,5,7...奇数个节点
# etcd集群请提供如下NODE_NAME注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.1.1 NODE_NAME=etcd1 NODE_IP="192.168.1.1"
192.168.1.2 NODE_NAME=etcd2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_NAME=etcd3 NODE_IP="192.168.1.3"
192.168.1.1 NODE_NAME=etcd1
192.168.1.2 NODE_NAME=etcd2
192.168.1.3 NODE_NAME=etcd3
[kube-master]
192.168.1.1 NODE_IP="192.168.1.1"
192.168.1.2 NODE_IP="192.168.1.2"
192.168.1.1
192.168.1.2
# 负载均衡至少两个节点,安装 haproxy+keepalived
[lb]
@ -23,21 +23,21 @@ master2="192.168.1.2:6443" # 需同步设置roles/lb/templates/haproxy.cfg.j2
ROUTER_ID=57 # 取值在0-255之间区分多个instance的VRRP组播同网段不能重复
[kube-node]
192.168.1.2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_IP="192.168.1.3"
192.168.1.4 NODE_IP="192.168.1.4"
192.168.1.2
192.168.1.3
192.168.1.4
# 如果启用harbor请配置后面harbor相关参数
[harbor]
#192.168.1.8 NODE_IP="192.168.1.8"
#192.168.1.8
# 预留组后续添加master节点使用
[new-master]
#192.168.1.5 NODE_IP="192.168.1.5"
#192.168.1.5
# 预留组后续添加node节点使用
[new-node]
#192.168.1.xx NODE_IP="192.168.1.xx"
#192.168.1.xx
[all:vars]
# ---------集群主要参数---------------

View File

@ -2,27 +2,27 @@
[deploy]
192.168.1.1
# etcd集群请提供如下NODE_NAME、NODE_IP变量,请注意etcd集群必须是1,3,5,7...奇数个节点
# etcd集群请提供如下NODE_NAME请注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.1.1 NODE_NAME=etcd1 NODE_IP="192.168.1.1"
192.168.1.2 NODE_NAME=etcd2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_NAME=etcd3 NODE_IP="192.168.1.3"
192.168.1.1 NODE_NAME=etcd1
192.168.1.2 NODE_NAME=etcd2
192.168.1.3 NODE_NAME=etcd3
[kube-master]
192.168.1.1 NODE_IP="192.168.1.1"
192.168.1.1
[kube-node]
192.168.1.1 NODE_IP="192.168.1.1"
192.168.1.2 NODE_IP="192.168.1.2"
192.168.1.3 NODE_IP="192.168.1.3"
192.168.1.1
192.168.1.2
192.168.1.3
# 如果启用harbor请配置后面harbor相关参数
[harbor]
#192.168.1.8 NODE_IP="192.168.1.8"
#192.168.1.8
# 预留组后续添加node节点使用
[new-node]
#192.168.1.xx NODE_IP="192.168.1.xx"
#192.168.1.xx
[all:vars]
# ---------集群主要参数---------------

View File

@ -2,7 +2,7 @@
"CN": "etcd",
"hosts": [
"127.0.0.1",
"{{ NODE_IP }}"
"{{ inventory_hostname }}"
],
"key": {
"algo": "rsa",

View File

@ -16,10 +16,10 @@ ExecStart={{ bin_dir }}/etcd \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file={{ ca_dir }}/ca.pem \
--peer-trusted-ca-file={{ ca_dir }}/ca.pem \
--initial-advertise-peer-urls=https://{{ NODE_IP }}:2380 \
--listen-peer-urls=https://{{ NODE_IP }}:2380 \
--listen-client-urls=https://{{ NODE_IP }}:2379,http://127.0.0.1:2379 \
--advertise-client-urls=https://{{ NODE_IP }}:2379 \
--initial-advertise-peer-urls=https://{{ inventory_hostname }}:2380 \
--listen-peer-urls=https://{{ inventory_hostname }}:2380 \
--listen-client-urls=https://{{ inventory_hostname }}:2379,http://127.0.0.1:2379 \
--advertise-client-urls=https://{{ inventory_hostname }}:2379 \
--initial-cluster-token=etcd-cluster-0 \
--initial-cluster={{ ETCD_NODES }} \
--initial-cluster-state=new \

View File

@ -2,7 +2,7 @@
"CN": "harbor",
"hosts": [
"127.0.0.1",
"{{ NODE_IP }}",
"{{ inventory_hostname }}",
"{{ HARBOR_DOMAIN }}"
],
"key": {

View File

@ -4,7 +4,7 @@
_version = 1.5.0
#The IP address or hostname to access admin UI and registry service.
#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname = {{ NODE_IP }}
hostname = {{ inventory_hostname }}
#The protocol for accessing the UI and token/notification service, by default it is http.
#It can be set to https if ssl is enabled on nginx.

View File

@ -6,7 +6,7 @@ After=network.target
[Service]
ExecStart={{ bin_dir }}/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--bind-address={{ NODE_IP }} \
--bind-address={{ inventory_hostname }} \
--insecure-bind-address=127.0.0.1 \
--authorization-mode=Node,RBAC \
--kubelet-https=true \

View File

@ -3,7 +3,7 @@
"hosts": [
"127.0.0.1",
"{{ MASTER_IP }}",
"{{ NODE_IP }}",
"{{ inventory_hostname }}",
"{{ CLUSTER_KUBERNETES_SVC_IP }}",
"kubernetes",
"kubernetes.default",

View File

@ -84,7 +84,7 @@
run_once: true
- name: 轮询等待node达到Ready状态
shell: "{{ bin_dir }}/kubectl get node {{ NODE_IP }}|awk 'NR>1{print $2}'"
shell: "{{ bin_dir }}/kubectl get node {{ inventory_hostname }}|awk 'NR>1{print $2}'"
register: node_status
until: node_status.stdout == "Ready" or node_status.stdout == "Ready,SchedulingDisabled"
retries: 8
@ -92,5 +92,5 @@
tags: upgrade_k8s, restart_node
- name: 设置node节点role
shell: "{{ bin_dir }}/kubectl label node {{ NODE_IP }} kubernetes.io/role=node --overwrite"
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=node --overwrite"
ignore_errors: true

View File

@ -8,8 +8,8 @@ After=network.target
# kube-proxy 会对访问 Service IP 的请求做 SNAT这个特性与calico 实现 network policy冲突因此禁用
WorkingDirectory=/var/lib/kube-proxy
ExecStart={{ bin_dir }}/kube-proxy \
--bind-address={{ NODE_IP }} \
--hostname-override={{ NODE_IP }} \
--bind-address={{ inventory_hostname }} \
--hostname-override={{ inventory_hostname }} \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
--logtostderr=true \
--v=2

View File

@ -8,8 +8,8 @@ Requires=docker.service
WorkingDirectory=/var/lib/kubelet
#--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest
ExecStart={{ bin_dir }}/kubelet \
--address={{ NODE_IP }} \
--hostname-override={{ NODE_IP }} \
--address={{ inventory_hostname }} \
--hostname-override={{ inventory_hostname }} \
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.1 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \

View File

@ -26,7 +26,7 @@
- hosts: deploy
tasks:
- name: 删除老IP地址的node
shell: "{{ bin_dir }}/kubectl get node |grep -v '{{ NODE_IP }}'|awk '{print $1}' |xargs {{ bin_dir }}/kubectl delete node"
shell: "{{ bin_dir }}/kubectl get node |grep -v '{{ inventory_hostname }}'|awk '{print $1}' |xargs {{ bin_dir }}/kubectl delete node"
ignore_errors: true
- name: 删除原network插件部署