translate playbooks comments into english -1

pull/334/head
gjmzj 2018-09-29 09:06:19 +08:00
parent 8caa842e2f
commit eab1d628e9
24 changed files with 207 additions and 196 deletions

2
.gitignore vendored
View File

@ -17,6 +17,6 @@ manifests/storage/*
roles/cluster-backup/files/*
!roles/cluster-backup/files/readme.md
# roles/xxx/vars, exclude roles/os-harden/vars/
# role based variable settings, exclude roles/os-harden/vars/
/roles/*/vars/*
!/roles/os-harden/vars/

View File

@ -1,14 +1,14 @@
# 集群内时间同步
# [optional] to synchronize time of nodes with 'chrony'
- hosts: all
roles:
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
# 在deploy节点生成CA相关证书
# to create CA, kubeconfig, kube-proxy.kubeconfig etc. on 'deploy' node
- hosts: deploy
roles:
- deploy
# 集群节点的公共配置任务
# prepare tasks for all nodes
- hosts:
- kube-master
- kube-node
@ -18,7 +18,7 @@
roles:
- prepare
# [可选]多master部署时的负载均衡配置
# [optional] to install loadbalance service, only needed by multi-master cluster
- hosts: lb
roles:
- lb

View File

@ -1,3 +1,4 @@
# to install etcd cluster
- hosts: etcd
roles:
- etcd

View File

@ -1,3 +1,4 @@
# to install docker service
- hosts:
- kube-master
- kube-node

View File

@ -1,16 +1,16 @@
# to set up 'kube-master' nodes
- hosts: kube-master
roles:
- kube-master
- kube-node
# 禁止业务 pod调度到 master节点
tasks:
- name: 禁止业务 pod调度到 master节点
- name: Making master nodes SchedulingDisabled
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
delegate_to: "{{ groups.deploy[0] }}"
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
- name: Setting master role name
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
delegate_to: "{{ groups.deploy[0] }}"

View File

@ -1,3 +1,4 @@
# to set up 'kube-node' nodes
- hosts: kube-node
roles:
- kube-node

View File

@ -1,4 +1,4 @@
# 集群网络插件部署,只能选择一种安装
# to install network plugin, only one can be choosen
- hosts:
- kube-master
- kube-node

View File

@ -1,3 +1,4 @@
# to install clust-addons
- hosts:
- kube-node
roles:

View File

@ -1,3 +1,6 @@
# [optional] to set up a HARBOR, and to integrate the HARBOR with k8s cluster
# read the guide: 'guide/harbor.md'
- hosts: harbor
roles:
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes' and NEW_INSTALL == 'yes'" }
@ -5,7 +8,7 @@
- { role: docker, when: "NEW_INSTALL == 'yes'" }
- { role: harbor, when: "NEW_INSTALL == 'yes'" }
tasks:
- name: 获取harbor服务器证书
- name: Fetching the HARBOR SERVER's CA cert
fetch:
src: "{{ ca_dir }}/ca.pem"
dest: "{{ base_dir }}/down/"
@ -19,14 +22,14 @@
vars:
harbor_domain: "{{ hostvars[groups.harbor[0]]['HARBOR_DOMAIN'] }}"
tasks:
- name: harbor证书目录创建
- name: Creating cert dir of the HARBOR SERVER for the docker daemon
file: name=/etc/docker/certs.d/{{ harbor_domain }} state=directory
- name: 推送harbor服务器证书
- name: Installing the HARBOR SERVER's cert on k8s nodes
copy: src={{ base_dir }}/down/ca.pem dest=/etc/docker/certs.d/{{ harbor_domain }}/ca.crt
# 如果你的环境中有dns服务器可以跳过hosts文件设置
- name: 增加harbor的hosts解析
# [optional] if you have a DNS server, add an 'A record' instead
- name: Adding an '/etc/hosts' entry for the HARBOR DOMAIN
lineinfile:
dest: /etc/hosts
state: present

View File

@ -4,7 +4,7 @@
- prepare
- docker
- kube-node
# 根据hosts中配置以下两种网络只会安装一种
#
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }

View File

@ -1,4 +1,4 @@
# 重新配置启动 haproxy
# reconfigure and restart the haproxy service
- hosts: lb
roles:
- lb
@ -10,20 +10,20 @@
- docker
- kube-master
- kube-node
# 根据hosts中配置以下两种网络只会安装一种
#
- { role: calico, when: "CLUSTER_NETWORK == 'calico'" }
- { role: cilium, when: "CLUSTER_NETWORK == 'cilium'" }
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
# 禁止业务 pod调度到 master节点
#
tasks:
- name: 禁止业务 pod调度到 master节点
- name: Making master nodes SchedulingDisabled
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
delegate_to: "{{ groups.deploy[0] }}"
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
- name: Setting master role name
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
delegate_to: "{{ groups.deploy[0] }}"

View File

@ -1,7 +1,7 @@
# 集群更新存在一定风险,请谨慎操作
# 使用命令ansible-playbook -t upgrade_k8s 22.upgrade.yml
# WARNING: Upgrade the k8s cluster can be risky. Make sure you know what you are doing.
# Read the guide: 'op/upgrade.md' .
# 更新kubectl二进制
# update kubectl binary
- hosts:
- kube-master
- kube-node
@ -9,12 +9,12 @@
roles:
- prepare
# 更新etcd集群
# update etcd
- hosts: etcd
roles:
- etcd
# docker更新为保证不中断业务不自动重启docker服务
# update docker binary
- hosts:
- kube-master
- new-master
@ -23,7 +23,7 @@
roles:
- docker
# 更新master节点
# update masters
- hosts:
- kube-master
- new-master
@ -31,7 +31,7 @@
- kube-master
- kube-node
# 更新node节点
# update nodes
- hosts:
- kube-node
- new-node

View File

@ -1,4 +1,5 @@
# 集群备份脚本,请详细参阅文档 docs/op/cluster_restore.md
# cluster-backup playbook
# read the guide: 'op/cluster_restore.md'
- hosts:
- etcd
@ -8,14 +9,14 @@
- hosts:
- deploy
tasks:
- name: 准备备份目录
- name: Creating backup dirs
file: name={{ item }} state=directory
with_items:
- "{{ base_dir }}/roles/cluster-backup/files/ca"
- "{{ base_dir }}/roles/cluster-backup/files/hosts"
- "{{ base_dir }}/roles/cluster-backup/files/snapshot"
- name: 备份 CA 相关文件
- name: Backing up CA sth
copy:
src: "{{ ca_dir }}/{{ item }}"
dest: "{{ base_dir }}/roles/cluster-backup/files/ca/{{ item }}"
@ -26,25 +27,25 @@
- ca-csr.json
- ca-config.json
- name: 备份 ansible hosts 1
- name: Backing up ansible hosts-1
copy:
src: "{{ base_dir }}/hosts"
dest: "{{ base_dir }}/roles/cluster-backup/files/hosts/hosts"
register: p
- name: 备份 ansible hosts 2
- name: Backing up ansible hosts-2
shell: "cd {{ base_dir }}/roles/cluster-backup/files/hosts && \
cp -fp hosts hosts-$(date +'%Y%m%d%H%M')"
when: p | changed
when: 'p is changed'
- name: 备份 etcd snapshot 1
- name: Backing up etcd snapshot-1
copy:
src: "{{ base_dir }}/roles/cluster-backup/files/snapshot.db"
dest: "{{ base_dir }}/roles/cluster-backup/files/snapshot/snapshot.db"
register: q
- name: 备份 etcd snapshot 2
- name: Backing up etcd snapshot-2
shell: "cd {{ base_dir }}/roles/cluster-backup/files/ && \
mv -f snapshot.db snapshot/snapshot-$(date +'%Y%m%d%H%M').db"
when: q | changed
when: 'q is changed'

View File

@ -1,12 +1,13 @@
# 集群从备份恢复的脚本,使用请参阅文档 docs/op/cluster_restore.md
# cluster-restore playbook
# read the guide: 'op/cluster_restore.md'
# 在deploy节点恢复CA相关证书
# to restore CA sth on 'deploy' node
- hosts: deploy
tasks:
- name: 恢复CA 文件夹
- name: Restoring dirs of CA sth
file: name=/etc/kubernetes/ssl/ state=directory
- name: 恢复CA 相关文件
- name: Restoring CA sth
copy:
src: "{{ base_dir }}/roles/cluster-backup/files/ca/{{ item }}"
dest: "{{ ca_dir }}/{{ item }}"
@ -21,22 +22,22 @@
roles:
- deploy
# 集群节点的公共配置任务
# pre-tasks on all nodes
- hosts: all
roles:
- prepare
# [可选]多master部署时的负载均衡配置
# [optional] only needed by multi-master cluster
- hosts: lb
roles:
- lb
# 创建etcd集群
# to install etcd cluster
- hosts: etcd
roles:
- etcd
# docker服务安装
# to install docker
- hosts:
- kube-master
- kube-node
@ -45,31 +46,32 @@
roles:
- docker
# to set up 'kube-master' nodes
- hosts:
- kube-master
- new-master
roles:
- kube-master
- kube-node
# 禁止业务 pod调度到 master节点
#
tasks:
- name: 禁止业务 pod调度到 master节点
- name: Making master nodes SchedulingDisabled
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
- name: Setting master role name
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
# node 节点部署
# to set up 'kube-node' nodes
- hosts:
- kube-node
- new-node
roles:
- kube-node
# etcd 集群进行数据恢复
# to restore data of etcd cluster
- hosts: etcd
roles:
- cluster-restore

View File

@ -1,14 +1,14 @@
# 集群内时间同步
# [optional] to synchronize time of nodes with 'chrony'
- hosts: all
roles:
- { role: chrony, when: "hostvars[groups.deploy[0]]['NTP_ENABLED'] == 'yes'" }
# 在deploy节点生成CA相关证书以供整个集群使用
# to create CA, kubeconfig, kube-proxy.kubeconfig etc. on 'deploy' node
- hosts: deploy
roles:
- deploy
# 集群节点的公共配置任务
# prepare tasks for all nodes
- hosts:
- kube-master
- kube-node
@ -18,46 +18,47 @@
roles:
- prepare
# [可选]多master部署时的负载均衡配置
# [optional] to install loadbalance service, only needed by multi-master cluster
- hosts: lb
roles:
- lb
# 创建etcd集群
# to install etcd cluster
- hosts: etcd
roles:
- etcd
# docker服务安装
# to install docker service
- hosts:
- kube-master
- kube-node
roles:
- docker
# to set up 'kube-master' nodes
- hosts: kube-master
roles:
- kube-master
- kube-node
# 禁止业务 pod调度到 master节点
#
tasks:
- name: 禁止业务 pod调度到 master节点
- name: Making master nodes SchedulingDisabled
shell: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }} "
delegate_to: "{{ groups.deploy[0] }}"
when: DEPLOY_MODE != "allinone"
ignore_errors: true
- name: 设置master节点role
- name: Setting master role name
shell: "{{ bin_dir }}/kubectl label node {{ inventory_hostname }} kubernetes.io/role=master --overwrite"
ignore_errors: true
delegate_to: "{{ groups.deploy[0] }}"
# node 节点部署
# to set up 'kube-node' nodes
- hosts: kube-node
roles:
- { role: kube-node, when: "DEPLOY_MODE != 'allinone'" }
# 集群网络插件部署,只能选择一种安装
# to install network plugin, only one can be choosen
- hosts:
- kube-master
- kube-node
@ -67,7 +68,7 @@
- { role: flannel, when: "CLUSTER_NETWORK == 'flannel'" }
- { role: kube-router, when: "CLUSTER_NETWORK == 'kube-router'" }
# 集群插件安装
# to install clust-addons
- hosts:
- kube-node
roles:

View File

@ -1,7 +1,7 @@
# 警告此脚本将清理整个K8S集群包括所有POD、ETCD数据等
# 请三思后运行此脚本 ansible-playbook 99.clean.yml
# WARNING: This playbook will erase the entire k8s-cluster, include PODs, ETCD data etc.
# Make sure you know what you are doing.
# 清理 kube-node 相关服务
# to clean 'kube-node' nodes
- hosts:
- kube-master
- new-master
@ -15,11 +15,11 @@
- kube-proxy
ignore_errors: true
- name: umount kubelet 挂载的目录
- name: umount kubelet filesystems
shell: "mount | grep '/var/lib/kubelet'| awk '{print $3}'|xargs umount"
ignore_errors: true
- name: 清理目录和文件
- name: remove files and dirs of 'kube-node' nodes
file: name={{ item }} state=absent
with_items:
- "/var/lib/kubelet/"
@ -28,7 +28,7 @@
- "/etc/systemd/system/kube-proxy.service"
- "/opt/kube/kube-system/"
# 清理 kube-master 相关
# to clean 'kube-master' nodes
- hosts:
- kube-master
- new-master
@ -41,7 +41,7 @@
- kube-scheduler
ignore_errors: true
- name: 清理目录和文件
- name: remove files and dirs of 'kube-master' nodes
file: name={{ item }} state=absent
with_items:
- "/var/run/kubernetes"
@ -49,14 +49,14 @@
- "/etc/systemd/system/kube-controller-manager.service"
- "/etc/systemd/system/kube-scheduler.service"
# 清理集群docker服务、网络相关
# to clean docker service and networking
- hosts:
- kube-master
- new-master
- kube-node
- new-node
tasks:
- name: 清理kube-router相关
- name: clean 'kube-router' stuff
shell: "{{ bin_dir }}/docker run --privileged --net=host cloudnativelabs/kube-router --cleanup-config"
ignore_errors: true
when: "CLUSTER_NETWORK == 'kube-router'"
@ -68,15 +68,14 @@
enabled: no
ignore_errors: true
# 因为calico-kube-controller使用了host网络相当于使用了docker -net=host需要
# 卸载 /var/run/docker/netns/default
- name: 卸载docker 相关fs1
# as k8s-network-plugins use host-network, '/var/run/docker/netns/default' must be umounted
- name: unmount docker filesystem-1
mount: path=/var/run/docker/netns/default state=unmounted
- name: 卸载docker 相关fs2
- name: unmount docker filesystem-2
mount: path=/var/lib/docker/overlay state=unmounted
- name: 清理目录和文件
- name: remove files and dirs
file: name={{ item }} state=absent
with_items:
- "/etc/cni/"
@ -100,13 +99,13 @@
- "/etc/bash_completion.d/docker"
ignore_errors: true
- name: 清理 iptables
- name: cleanup iptables
shell: "iptables -F && iptables -X \
&& iptables -F -t nat && iptables -X -t nat \
&& iptables -F -t raw && iptables -X -t raw \
&& iptables -F -t mangle && iptables -X -t mangle"
- name: 清理网络
- name: cleanup networks
shell: "ip link del docker0; \
ip link del tunl0; \
ip link del flannel.1; \
@ -121,12 +120,12 @@
systemctl restart network"
ignore_errors: true
- name: 清理calico残留路由
- name: cleanup 'calico' routes
shell: "for rt in `ip route|grep bird|sed 's/blackhole//'|awk '{print $1}'`;do ip route del $rt;done;"
when: "CLUSTER_NETWORK == 'calico'"
ignore_errors: true
# 清理etcd 集群相关
# to clean 'etcd' nodes
- hosts: etcd
tasks:
- name: stop and disable etcd service
@ -136,7 +135,7 @@
enabled: no
ignore_errors: true
- name: 清理目录和文件
- name: remove files and dirs
file: name={{ item }} state=absent
with_items:
- "/var/lib/etcd"
@ -144,7 +143,7 @@
- "/backup/k8s"
- "/etc/systemd/system/etcd.service"
# 清理负载均衡相关
# to clean 'lb' nodes
- hosts: lb
tasks:
- name: stop keepalived service
@ -155,12 +154,13 @@
shell: systemctl disable haproxy && systemctl stop haproxy
ignore_errors: true
- name: 清理LB 配置文件目录
- name: remove files and dirs
file: name={{ item }} state=absent
with_items:
- "/etc/haproxy"
- "/etc/keepalived"
# to clean ntp, certs and keys, env path
- hosts:
- kube-master
- new-master
@ -178,7 +178,7 @@
ignore_errors: true
tags: rm_ntp
- name: 清理证书目录和文件
- name: clean certs and keys
file: name={{ item }} state=absent
with_items:
- "/etc/kubernetes/"
@ -186,7 +186,7 @@
- "/root/.kube/"
- "/etc/docker/"
- name: 清理自动生成的PATH
- name: clean 'ENV PATH'
lineinfile:
dest: ~/.bashrc
state: absent

View File

@ -1,3 +1,3 @@
# 项目二进制目录
# Binaries for installing k8s
集群用到的所有二进制文件已打包好供下载 https://pan.baidu.com/s/1c4RFaA
Binaries can be downloaded at https://pan.baidu.com/s/1c4RFaA, alternatively they can be downloaded from official github repos separately.

View File

@ -15,7 +15,7 @@ After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kube/bin/kube-proxy \
ExecStart={{ bin_dir }}/kube-proxy \
--bind-address={{ NODE_IP }} \
--hostname-override={{ NODE_IP }} \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \

View File

@ -1,16 +1,17 @@
#!/bin/bash
#主要组件版本如下
export K8S_VER=v1.10.2
export ETCD_VER=v3.3.8
export DOCKER_VER=17.03.2-ce
export CNI_VER=v0.7.0
export DOCKER_COMPOSE=1.18.0
export HARBOR=v1.2.2
# This script describes where to download the official released binaries needed
# It's suggested to download the entire *.tar.gz at https://pan.baidu.com/s/1c4RFaA
echo "\n建议直接下载本人打包好的所有必要二进制包k8s-***.all.tar.gz然后解压到bin目录"
echo "\n建议不使用此脚本如果你想升级组件或者实验请通读该脚本必要时适当修改后使用"
echo "\n注意1请按照以下链接手动下载二进制包到down目录中"
echo "\n注意2如果还没有手工下载tar包请Ctrl-c结束此脚本"
# example releases
K8S_VER=v1.10.4
ETCD_VER=v3.3.8
DOCKER_VER=17.03.2-ce
CNI_VER=v0.6.0
DOCKER_COMPOSE=1.18.0
HARBOR=v1.5.2
echo "\nNote1: Before this script, please finish downloading binaries manually from following urls."
echo "\nNote2If binaries are not ready, use `Ctrl + C` to stop this script."
echo "\n----download k8s binary at:"
echo https://dl.k8s.io/${K8S_VER}/kubernetes-server-linux-amd64.tar.gz
@ -38,36 +39,34 @@ echo https://github.com/containernetworking/plugins/releases
sleep 30
### 准备证书工具程序
echo "\n准备证书工具程序..."
### prepare 'cfssl' cert tool suit
echo "\nMoving 'cfssl' to 'bin' dir..."
if [ -f "cfssl_linux-amd64" ]; then
mv -f cfssl_linux-amd64 ../bin/cfssl
else
echo 请先下载https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
echo Please download 'cfssl' at 'https://pkg.cfssl.org/R1.2/cfssl_linux-amd64'
fi
if [ -f "cfssljson_linux-amd64" ]; then
mv -f cfssljson_linux-amd64 ../bin/cfssljson
else
echo 请先下载https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
echo Please download 'cfssljson' at 'https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64'
fi
if [ -f "cfssl-certinfo_linux-amd64" ]; then
mv -f cfssl-certinfo_linux-amd64 ../bin/cfssl-certinfo
else
echo 请先下载https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
echo Please download 'cfssl-certinfo' at 'https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64'
fi
### 准备etcd程序
echo "\n准备etcd二进制程序..."
### prepare 'etcd' binaries
if [ -f "etcd-${ETCD_VER}-linux-amd64.tar.gz" ]; then
echo "\nextracting etcd binaries..."
tar zxf etcd-${ETCD_VER}-linux-amd64.tar.gz
mv -f etcd-${ETCD_VER}-linux-amd64/etcd* ../bin
else
echo 请先下载etcd-${ETCD_VER}-linux-amd64.tar.gz
echo Please download 'etcd-${ETCD_VER}-linux-amd64.tar.gz' first
fi
### 准备kubernetes程序
echo "\n准备kubernetes二进制程序..."
### prepare kubernetes binaries
if [ -f "kubernetes-server-linux-amd64.tar.gz" ]; then
echo "\nextracting kubernetes binaries..."
tar zxf kubernetes-server-linux-amd64.tar.gz
@ -78,11 +77,10 @@ if [ -f "kubernetes-server-linux-amd64.tar.gz" ]; then
mv -f kubernetes/server/bin/kube-proxy ../bin
mv -f kubernetes/server/bin/kube-scheduler ../bin
else
echo 请先下载kubernetes-server-linux-amd64.tar.gz
echo Please download 'kubernetes-server-linux-amd64.tar.gz' first
fi
### 准备docker程序
echo "\n准备docker二进制程序..."
### prepare docker binaries
if [ -f "docker-${DOCKER_VER}.tgz" ]; then
echo "\nextracting docker binaries..."
tar zxf docker-${DOCKER_VER}.tgz
@ -91,11 +89,10 @@ if [ -f "docker-${DOCKER_VER}.tgz" ]; then
mv -f docker/completion/bash/docker ../roles/docker/files/docker
fi
else
echo 请先下载docker-${DOCKER_VER}.tgz
echo Please download 'docker-${DOCKER_VER}.tgz' first
fi
### 准备cni plugins仅安装flannel需要安装calico由容器专门下载cni plugins
echo "\n准备cni plugins仅安装flannel需要安装calico由容器专门下载cni plugins..."
### prepare cni plugins, needed by flannel;
if [ -f "cni-${CNI_VER}.tgz" ]; then
echo "\nextracting cni plugins binaries..."
tar zxf cni-${CNI_VER}.tgz
@ -105,5 +102,5 @@ if [ -f "cni-${CNI_VER}.tgz" ]; then
mv -f loopback ../bin
mv -f portmap ../bin
else
echo 请先下载cni-${CNI_VER}.tgz
echo Please download 'cni-${CNI_VER}.tgz' first
fi

View File

@ -1,42 +1,42 @@
###---[basic_images ]
# dns-addon 插件
# dns-addon
coredns/coredns:1.1.3
mirrorgooglecontainers/k8s-dns-kube-dns-amd64:1.14.10
mirrorgooglecontainers/k8s-dns-dnsmasq-nanny-amd64:1.14.10
mirrorgooglecontainers/k8s-dns-sidecar-amd64:1.14.10
# metrics-server 插件
# metrics-server
mirrorgooglecontainers/metrics-server-amd64:v0.2.1
# calico 网络插件
# calico
calico/node:v3.1.3
calico/cni:v3.1.3
calico/kube-controllers:v3.1.3
# cilium 网络插件
# cilium
cilium/cilium:v1.1.2
# flannel 网络插件
# flannel
jmgao1983/flannel:v0.10.0-amd64
# kube-router 网络插件
# kube-router
cloudnativelabs/kube-router:v0.2.0-beta.9
# dashboard 插件
# dashboard
mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.8.3
# pause 基础容器镜像
# pause
mirrorgooglecontainers/pause-amd64:3.1
busybox:1.28.4
# traefik ingress 镜像
# traefik ingress
traefik:v1.6
# heapster 插件
# heapster
mirrorgooglecontainers/heapster-grafana-amd64:v4.4.3
mirrorgooglecontainers/heapster-amd64:v1.5.1
mirrorgooglecontainers/heapster-influxdb-amd64:v1.3.3
###---[extra-images]
# helm 服务端
# helm tiller server
jmgao1983/tiller:v2.9.1
# efk 插件
# efk
mirrorgooglecontainers/elasticsearch:v5.6.4
alpine:3.6
mirrorgooglecontainers/fluentd-elasticsearch:v2.0.2
jmgao1983/kibana:5.6.4
# nfs 动态PV
# nfs dynamic PV provisioner
jmgao1983/nfs-client-provisioner:latest
# prometheus
busybox:latest

View File

@ -1,9 +1,10 @@
# 集群部署节点一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
# 'deploy' node, which the ansible-playbooks usually run on
# variable 'NTP_ENABLED(=yes/no)' enables/disables the NTP server 'chrony'
[deploy]
192.168.1.1 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME注意etcd集群必须是1,3,5,7...奇数个节点
# 'etcd' cluster must have odd member(s) (1,3,5,...)
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
[etcd]
192.168.1.1 NODE_NAME=etcd1
@ -13,57 +14,57 @@
[kube-node]
192.168.1.1
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 预留组后续添加node节点使用
# group reserved, add new 'kube-node' in it
[new-node]
#192.168.1.xx
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
# --------- Main Variables ---------------
# Cluster Deployment Mode: allinone, single-master, multi-master
DEPLOY_MODE=allinone
#集群主版本号,目前支持: v1.8, v1.9, v1.10v1.11
# Versions supported: v1.8, v1.9, v1.10, v1.11, v1.12
K8S_VER="v1.10"
#集群 MASTER IP自动生成
# Cluster's Master IP, auto generated
MASTER_IP="{{ groups['kube-master'][0] }}"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
# Network plugins supported: calico, flannel, kube-router, cilium
CLUSTER_NETWORK="flannel"
# 服务网段 (Service CIDR注意不要与内网已有网段冲突
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# POD 网段 (Cluster CIDR注意不要与内网已有网段冲突
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# 服务端口范围 (NodePort Range)
# NodePort Range
NODE_PORT_RANGE="20000-40000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
# Kubernetes SVC IP (usually assigned with the first available IP of 'SERVICE_CIDR')
CLUSTER_KUBERNETES_SVC_IP="10.68.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
# Cluster DNS Server's IP (assigned with an available IP of 'SERVICE_CIDR')
CLUSTER_DNS_SVC_IP="10.68.0.2"
# 集群 DNS 域名
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local."
# 集群basic auth 使用的用户名和密码
# Basic auth for apiserver
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="test1234"
# ---------附加参数--------------------
#默认二进制文件目录
# -------- Additional Variables --------------------
# Binaries Directory
bin_dir="/opt/kube/bin"
#证书目录
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录
# Deploy Directory (kubeasz workspace), don't change the default value right now
base_dir="/etc/ansible"

View File

@ -1,9 +1,10 @@
# 集群部署节点一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
# 'deploy' node, which the ansible-playbooks usually run on
# variable 'NTP_ENABLED(=yes/no)' enables/disables the NTP server 'chrony'
[deploy]
192.168.1.1 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME注意etcd集群必须是1,3,5,7...奇数个节点
# 'etcd' cluster must have odd member(s) (1,3,5,...)
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
[etcd]
192.168.1.1 NODE_NAME=etcd1
192.168.1.2 NODE_NAME=etcd2
@ -13,71 +14,71 @@
192.168.1.1
192.168.1.2
# 负载均衡(目前已支持多于2节点一般2节点就够了) 安装 haproxy+keepalived
# 'loadbalance' node, with 'haproxy+keepalived' installed
[lb]
192.168.1.1 LB_IF="eth0" LB_ROLE=backup # 注意根据实际使用网卡设置 LB_IF变量
192.168.1.1 LB_IF="eth0" LB_ROLE=backup # replace 'etho' with node's network interface
192.168.1.2 LB_IF="eth0" LB_ROLE=master
[kube-node]
192.168.1.3
192.168.1.4
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 预留组后续添加master节点使用
# group reserved, add new 'kube-master' in it
[new-master]
#192.168.1.5
# 预留组后续添加node节点使用
# group reserved, add new 'kube-node' in it
[new-node]
#192.168.1.xx
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
# --------- Main Variables ---------------
# Cluster Deployment Mode: allinone, single-master, multi-master
DEPLOY_MODE=multi-master
#集群主版本号,目前支持: v1.8, v1.9, v1.10v1.11
# Versions supported: v1.8, v1.9, v1.10, v1.11, v1.12
K8S_VER="v1.10"
# 集群 MASTER IP即 LB节点VIP地址为区别与默认apiserver端口设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
# Cluster's Master IP, generated by 'keepalived' daemon on a 'lb' node here
# 'haproxy' daemon listens on port 8443, directs requests to real apiservers on port 6443
MASTER_IP="192.168.1.10"
KUBE_APISERVER="https://{{ MASTER_IP }}:8443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
# Network plugins supported: calico, flannel, kube-router, cilium
CLUSTER_NETWORK="flannel"
# 服务网段 (Service CIDR注意不要与内网已有网段冲突
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# POD 网段 (Cluster CIDR注意不要与内网已有网段冲突
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# 服务端口范围 (NodePort Range)
# NodePort Range
NODE_PORT_RANGE="20000-40000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
# Kubernetes SVC IP (usually assigned with the first available IP of 'SERVICE_CIDR')
CLUSTER_KUBERNETES_SVC_IP="10.68.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
# Cluster DNS Server's IP (assigned with an available IP of 'SERVICE_CIDR')
CLUSTER_DNS_SVC_IP="10.68.0.2"
# 集群 DNS 域名
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local."
# 集群basic auth 使用的用户名和密码
# Basic auth for apiserver
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="test1234"
# ---------附加参数--------------------
#默认二进制文件目录
# -------- Additional Variables --------------------
# Binaries Directory
bin_dir="/opt/kube/bin"
#证书目录
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录,建议不要修改
# Deploy Directory (kubeasz workspace), don't change the default value right now
base_dir="/etc/ansible"

View File

@ -1,9 +1,10 @@
# 集群部署节点一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
# 'deploy' node, which the ansible-playbooks usually run on
# variable 'NTP_ENABLED(=yes/no)' enables/disables the NTP server 'chrony'
[deploy]
192.168.1.1 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME请注意etcd集群必须是1,3,5,7...奇数个节点
# 'etcd' cluster must have odd member(s) (1,3,5,...)
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
[etcd]
192.168.1.1 NODE_NAME=etcd1
@ -14,57 +15,57 @@
192.168.1.2
192.168.1.3
# 参数 NEW_INSTALLyes表示新建no表示使用已有harbor服务器
# variable NEW_INSTALL: 'yes' to setup a new harbor server; 'no' to integrate with existed one
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 预留组后续添加node节点使用
# group reserved, add new 'kube-node' in it
[new-node]
#192.168.1.xx
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
# --------- Main Variables ---------------
# Cluster Deployment Mode: allinone, single-master, multi-master
DEPLOY_MODE=single-master
#集群主版本号,目前支持: v1.8, v1.9, v1.10v1.11
# Versions supported: v1.8, v1.9, v1.10, v1.11, v1.12
K8S_VER="v1.11"
#集群 MASTER IP自动生成
# Cluster's Master IP, auto generated
MASTER_IP="{{ groups['kube-master'][0] }}"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
# Network plugins supported: calico, flannel, kube-router, cilium
CLUSTER_NETWORK="flannel"
# 服务网段 (Service CIDR注意不要与内网已有网段冲突
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# POD 网段 (Cluster CIDR注意不要与内网已有网段冲突
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# 服务端口范围 (NodePort Range)
# NodePort Range
NODE_PORT_RANGE="20000-40000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
# Kubernetes SVC IP (usually assigned with the first available IP of 'SERVICE_CIDR')
CLUSTER_KUBERNETES_SVC_IP="10.68.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
# Cluster DNS Server's IP (assigned with an available IP of 'SERVICE_CIDR')
CLUSTER_DNS_SVC_IP="10.68.0.2"
# 集群 DNS 域名
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local."
# 集群basic auth 使用的用户名和密码
# Basic auth for apiserver
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="test1234"
# ---------附加参数--------------------
#默认二进制文件目录
# -------- Additional Variables --------------------
# Binaries Directory
bin_dir="/opt/kube/bin"
#证书目录
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录
# Deploy Directory (kubeasz workspace), don't change the default value right now
base_dir="/etc/ansible"

View File

@ -14,7 +14,7 @@
- name: 创建tiller 服务端证书请求
template: src=tiller-csr.json.j2 dest={{ ca_dir }}/{{ tiller_cert_cn }}-csr.json
- name: 创建 calico证书和私钥
- name: 创建tiller 服务端证书和私钥
shell: "cd {{ ca_dir }} && {{ bin_dir }}/cfssl gencert \
-ca={{ ca_dir }}/ca.pem \
-ca-key={{ ca_dir }}/ca-key.pem \