feat: add client kubeconfig management in 'ezctl'

pull/992/head
gjmzj 2021-01-17 23:59:40 +08:00
parent 3a03ae67db
commit e97c65ce5f
12 changed files with 219 additions and 222 deletions

View File

@ -26,12 +26,9 @@ local_network: "0.0.0.0/0"
CA_EXPIRY: "876000h"
CERT_EXPIRY: "438000h"
# kubeconfig 配置参数注意权限根据USER_NAME设置
# 'admin' 表示创建集群管理员(所有)权限的 kubeconfig
# 'read' 表示创建只读权限的 kubeconfig
# kubeconfig 配置参数
CLUSTER_NAME: "cluster1"
USER_NAME: "admin"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}-{{ USER_NAME }}"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"
############################

144
ezctl
View File

@ -30,29 +30,13 @@ Cluster ops:
del-master <cluster> <ip> to delete a master node from the k8s cluster
del-node <cluster> <ip> to delete a work node from the k8s cluster
Extra operation:
kcfg-adm <cluster> <args> to manage client kubeconfig of the k8s cluster
Use "ezctl help <command>" for more information about a given command.
EOF
}
function usage-setup(){
echo -e "\033[33mUsage:\033[0m ezctl setup <cluster> <step>"
cat <<EOF
available steps:
01 prepare to prepare CA/certs & kubeconfig & other system settings
02 etcd to setup the etcd cluster
03 runtime to setup the container runtime(docker or containerd)
04 kube-master to setup the master nodes
05 kube-node to setup the worker nodes
06 network to setup the network plugin
07 cluster-addon to setup other useful plugins
all to run 01~07 all at once
examples: ./ezctl setup test-k8s 01
./ezctl setup test-k8s 02
./ezctl setup test-k8s all
EOF
}
function logger() {
TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S')
case "$1" in
@ -96,12 +80,51 @@ function help-info() {
(del-node)
echo -e "read more > 'https://github.com/easzlab/kubeasz/blob/master/docs/op/op-node.md'"
;;
(kcfg-adm)
usage-kcfg-adm
;;
(*)
echo -e "todo: help info $1"
;;
esac
}
function usage-kcfg-adm(){
echo -e "\033[33mUsage:\033[0m ezctl kcfg-adm <cluster> <args>"
cat <<EOF
available <args>:
-A to add a client kubeconfig with a newly created user
-D to delete a client kubeconfig with the existed user
-L to list all of the users
-e to set expiry of the user certs in hours (ex. 24h, 8h, 240h)
-t to set a user-type (admin or view)
-u to set a user-name prefix
examples: ./ezctl kcfg-adm test-k8s -L
./ezctl kcfg-adm default -A -e 240h -t admin -u jack
./ezctl kcfg-adm default -D -u jim-202101162141
EOF
}
function usage-setup(){
echo -e "\033[33mUsage:\033[0m ezctl setup <cluster> <step>"
cat <<EOF
available steps:
01 prepare to prepare CA/certs & kubeconfig & other system settings
02 etcd to setup the etcd cluster
03 runtime to setup the container runtime(docker or containerd)
04 kube-master to setup the master nodes
05 kube-node to setup the worker nodes
06 network to setup the network plugin
07 cluster-addon to setup other useful plugins
all to run 01~07 all at once
examples: ./ezctl setup test-k8s 01
./ezctl setup test-k8s 02
./ezctl setup test-k8s all
EOF
}
### Cluster setups functions ##############################
function new() {
@ -374,6 +397,84 @@ function start-aio(){
setup default all
}
### Extra functions #############################################
EXPIRY=4800h # default cert will expire in 200 days
USER_TYPE=admin # admin/view, admin=clusterrole:cluster-admin view=clusterrole:view
USER_NAME=user
function kcfg-adm() {
OPTIND=2
ACTION=""
while getopts "ADLe:t:u:" OPTION; do
case $OPTION in
A)
ACTION="add-kcfg $1"
;;
D)
ACTION="del-kcfg $1"
;;
L)
ACTION="list-kcfg $1"
;;
e)
EXPIRY="$OPTARG"
[[ $OPTARG =~ ^[1-9][0-9]*h$ ]] || { logger error "'-e' must be set like '2h, 5h, 50000h, ...'"; exit 1; }
;;
t)
USER_TYPE="$OPTARG"
[[ $OPTARG =~ ^(admin|view)$ ]] || { logger error "'-t' can only be set as 'admin' or 'view'"; exit 1; }
;;
u)
USER_NAME="$OPTARG"
;;
?)
help-info kcfg-adm
return 1
;;
esac
done
[[ "$ACTION" == "" ]] && { logger error "illegal option"; help-info kcfg-adm; exit 1; }
logger info "$ACTION"
${ACTION} || { logger error "$ACTION fail"; return 1; }
logger info "$ACTION success"
}
function add-kcfg(){
USER_NAME="$USER_NAME"-$(date +'%Y%m%d%H%M')
logger info "add-kcfg in cluster:$1 with user:$USER_NAME"
ansible-playbook -i "clusters/$1/hosts" -e "@clusters/$1/config.yml" -e "CUSTOM_EXPIRY=$EXPIRY" \
-e "USER_TYPE=$USER_TYPE" -e "USER_NAME=$USER_NAME" -e "ADD_KCFG=true" \
-t add-kcfg "roles/deploy/deploy.yml"
}
function del-kcfg(){
logger info "del-kcfg in cluster:$1 with user:$USER_NAME"
CRB=$(bin/kubectl --kubeconfig="clusters/$1/kubectl.kubeconfig" get clusterrolebindings -ojsonpath="{.items[?(@.subjects[0].name == '$USER_NAME')].metadata.name}") && \
bin/kubectl --kubeconfig="clusters/$1/kubectl.kubeconfig" delete clusterrolebindings "$CRB" && \
/bin/rm -f "clusters/$1/ssl/users/$USER_NAME"*
}
function list-kcfg(){
logger info "list-kcfg in cluster:$1"
printf "\n%-30s %-15s %-20s\n" USER TYPE "EXPIRY(+8h if in Asia/Shanghai)"
echo "---------------------------------------------------------------------------------"
ADMINS=$(bin/kubectl --kubeconfig="clusters/$1/kubectl.kubeconfig" get clusterrolebindings -ojsonpath='{.items[?(@.roleRef.name == "cluster-admin")].subjects[*].name}')
VIEWS=$(bin/kubectl --kubeconfig="clusters/$1/kubectl.kubeconfig" get clusterrolebindings -ojsonpath='{.items[?(@.roleRef.name == "view")].subjects[*].name}')
for u in $ADMINS; do
if [[ $u =~ ^.*-[0-9]{12}$ ]];then
t=$(bin/cfssl-certinfo -cert "clusters/$1/ssl/users/$u.pem"|grep not_after|awk '{print $2}'|sed 's/"//g'|sed 's/,//g')
printf "%-30s %-15s %-20s\n" "$u" cluster-admin "$t"
fi
done;
for u in $VIEWS; do
if [[ $u =~ ^.*-[0-9]{12}$ ]];then
t=$(bin/cfssl-certinfo -cert "clusters/$1/ssl/users/$u.pem"|grep not_after|awk '{print $2}'|sed 's/"//g'|sed 's/,//g')
printf "%-30s %-15s %-20s\n" "$u" view "$t"
fi
done;
echo ""
}
### Main Lines ##################################################
function main() {
@ -460,6 +561,11 @@ function main() {
[ "$#" -eq 1 ] || { usage >&2; exit 2; }
start-aio
;;
### extra operations ##############################
(kcfg-adm)
[ "$#" -gt 2 ] || { usage-kcfg-adm >&2; exit 2; }
kcfg-adm "${@:2}"
;;
(help)
[ "$#" -gt 1 ] || { usage >&2; exit 2; }
help-info "$2"

View File

@ -1,158 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: read-clusterrole-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: read-clusterrole
subjects:
- kind: Group
name: "group:read"
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: read-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- persistentvolumes
- persistentvolumeclaims
- persistentvolumeclaims/status
- pods
- replicationcontrollers
- replicationcontrollers/scale
- serviceaccounts
- services
- services/status
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- bindings
- events
- limitranges
- namespaces/status
- pods/log
- pods/status
- replicationcontrollers/status
- resourcequotas
- resourcequotas/status
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- controllerrevisions
- daemonsets
- daemonsets/status
- deployments
- deployments/scale
- deployments/status
- replicasets
- replicasets/scale
- replicasets/status
- statefulsets
- statefulsets/scale
- statefulsets/status
verbs:
- get
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
- horizontalpodautoscalers/status
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- cronjobs/status
- jobs
- jobs/status
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- daemonsets/status
- deployments
- deployments/scale
- deployments/status
- ingresses
- ingresses/status
- replicasets
- replicasets/scale
- replicasets/status
- replicationcontrollers/scale
verbs:
- get
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
- poddisruptionbudgets/status
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingresses/status
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
- volumeattachments
verbs:
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- roles
- rolebindings
verbs:
- get
- list
- watch

View File

@ -0,0 +1,46 @@
- name: 创建自定义用户证书目录
file: name={{ cluster_dir }}/ssl/users/ state=directory
- name: 准备CA配置文件
template: src=ca-config.json.j2 dest={{ cluster_dir }}/ssl/ca-config.json
- name: 准备kubectl使用的{{ USER_NAME }}证书签名请求
template: src=user-csr.json.j2 dest={{ cluster_dir }}/ssl/users/{{ USER_NAME }}-csr.json
- name: 创建{{ USER_NAME }}证书与私钥
shell: "cd {{ cluster_dir }}/ssl/users && {{ base_dir }}/bin/cfssl gencert \
-ca={{ cluster_dir }}/ssl/ca.pem \
-ca-key={{ cluster_dir }}/ssl/ca-key.pem \
-config={{ cluster_dir }}/ssl/ca-config.json \
-profile=kcfg {{ USER_NAME }}-csr.json | {{ base_dir }}/bin/cfssljson -bare {{ USER_NAME }}"
- name: 设置集群参数
shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \
--certificate-authority={{ cluster_dir }}/ssl/ca.pem \
--embed-certs=true \
--server={{ KUBE_APISERVER }} \
--kubeconfig={{ cluster_dir }}/ssl/users/{{ USER_NAME }}.kubeconfig"
- name: 设置客户端认证参数
shell: "{{ base_dir }}/bin/kubectl config set-credentials {{ USER_NAME }} \
--client-certificate={{ cluster_dir }}/ssl/users/{{ USER_NAME }}.pem \
--embed-certs=true \
--client-key={{ cluster_dir }}/ssl/users/{{ USER_NAME }}-key.pem \
--kubeconfig={{ cluster_dir }}/ssl/users/{{ USER_NAME }}.kubeconfig"
- name: 设置上下文参数
shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \
--cluster={{ CLUSTER_NAME }} --user={{ USER_NAME }} \
--kubeconfig={{ cluster_dir }}/ssl/users/{{ USER_NAME }}.kubeconfig"
- name: 选择默认上下文
shell: "{{ base_dir }}/bin/kubectl config use-context {{ CONTEXT_NAME }} \
--kubeconfig={{ cluster_dir }}/ssl/users/{{ USER_NAME }}.kubeconfig"
- name: 生成clusterrolebind 配置文件
template: src=crb.yaml.j2 dest={{ cluster_dir }}/ssl/users/crb-{{ USER_NAME }}.yaml
- name: 创建clusterrolebind 配置
shell: "{{ base_dir }}/bin/kubectl apply -f {{ cluster_dir }}/ssl/users/crb-{{ USER_NAME }}.yaml"
- debug: msg="查看{{ USER_NAME }}自定义kubeconfig{{ cluster_dir }}/ssl/users/{{ USER_NAME }}.kubeconfig"

View File

@ -1,24 +1,12 @@
- name: 删除原有kubeconfig
file: path=/root/.kube/config state=absent
ignore_errors: true
- name: 准备kubectl使用的admin证书签名请求
template: src=admin-csr.json.j2 dest={{ cluster_dir }}/ssl/admin-csr.json
- name: 下载 group:read rbac 文件
copy: src=read-group-rbac.yaml dest=/tmp/read-group-rbac.yaml
when: USER_NAME == "read"
- name: 创建group:read rbac 绑定
shell: "{{ base_dir }}/bin/kubectl apply -f /tmp/read-group-rbac.yaml"
when: USER_NAME == "read"
- name: 准备kubectl使用的{{ USER_NAME }}证书签名请求
template: src={{ USER_NAME }}-csr.json.j2 dest={{ cluster_dir }}/ssl/{{ USER_NAME }}-csr.json
- name: 创建{{ USER_NAME }}证书与私钥
- name: 创建admin证书与私钥
shell: "cd {{ cluster_dir }}/ssl && {{ base_dir }}/bin/cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes {{ USER_NAME }}-csr.json | {{ base_dir }}/bin/cfssljson -bare {{ USER_NAME }}"
-profile=kubernetes admin-csr.json | {{ base_dir }}/bin/cfssljson -bare admin"
- name: 设置集群参数
shell: "{{ base_dir }}/bin/kubectl config set-cluster {{ CLUSTER_NAME }} \
@ -28,15 +16,15 @@
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
- name: 设置客户端认证参数
shell: "{{ base_dir }}/bin/kubectl config set-credentials {{ USER_NAME }} \
--client-certificate={{ cluster_dir }}/ssl/{{ USER_NAME }}.pem \
shell: "{{ base_dir }}/bin/kubectl config set-credentials admin \
--client-certificate={{ cluster_dir }}/ssl/admin.pem \
--embed-certs=true \
--client-key={{ cluster_dir }}/ssl/{{ USER_NAME }}-key.pem \
--client-key={{ cluster_dir }}/ssl/admin-key.pem \
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
- name: 设置上下文参数
shell: "{{ base_dir }}/bin/kubectl config set-context {{ CONTEXT_NAME }} \
--cluster={{ CLUSTER_NAME }} --user={{ USER_NAME }} \
--cluster={{ CLUSTER_NAME }} --user=admin \
--kubeconfig={{ cluster_dir }}/kubectl.kubeconfig"
- name: 选择默认上下文

View File

@ -31,6 +31,11 @@
- import_tasks: create-kubectl-kubeconfig.yml
tags: create_kctl_cfg
#----------- 创建个性化客户端配置文件
- import_tasks: add-custom-kubectl-kubeconfig.yml
tags: add-kcfg
when: "ADD_KCFG|bool"
#------------创建配置文件: kube-proxy.kubeconfig
- import_tasks: create-kube-proxy-kubeconfig.yml

View File

@ -13,6 +13,16 @@
],
"expiry": "{{ CERT_EXPIRY }}"
}
},
"profiles": {
"kcfg": {
"usages": [
"signing",
"key encipherment",
"client auth"
],
"expiry": "{{ CUSTOM_EXPIRY }}"
}
}
}
}

View File

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: crb-{{ USER_NAME }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{% if USER_TYPE == 'admin' %}
name: cluster-admin
{% else %}
name: view
{% endif %}
subjects:
- kind: User
name: {{ USER_NAME }}
apiGroup: rbac.authorization.k8s.io

View File

@ -1,5 +1,5 @@
{
"CN": "read",
"CN": "{{ USER_NAME }}",
"hosts": [],
"key": {
"algo": "rsa",
@ -10,7 +10,7 @@
"C": "CN",
"ST": "HangZhou",
"L": "XS",
"O": "group:read",
"O": "k8s",
"OU": "System"
}
]

View File

@ -1,2 +1,6 @@
# apiserver 默认第一个master节点
KUBE_APISERVER: "https://{{ groups['kube-master'][0] }}:6443"
#
ADD_KCFG: false
CUSTOM_EXPIRY: "438000h"

View File

@ -33,21 +33,6 @@
- name: 准备 cni配置文件
template: src=cni-default.conf.j2 dest=/etc/cni/net.d/10-default.conf
# 判断 kubernetes 版本
- name: 注册变量 TMP_VER
shell: "{{ base_dir }}/bin/kube-apiserver --version|cut -d' ' -f2|cut -d'v' -f2"
register: TMP_VER
connection: local
tags: upgrade_k8s, restart_node
- name: 获取 kubernetes 主版本号
set_fact:
KUBE_VER: "{{ TMP_VER.stdout.split('.')[0]|int + TMP_VER.stdout.split('.')[1]|int/100 }}"
tags: upgrade_k8s, restart_node
- name: debug info
debug: var="KUBE_VER"
- name: 创建kubelet的配置文件
template: src=kubelet-config.yaml.j2 dest=/var/lib/kubelet/config.yaml
tags: upgrade_k8s, restart_node

View File

@ -29,9 +29,6 @@ ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/hugetlb/system.slice
{% endif %}
ExecStart={{ bin_dir }}/kubelet \
--config=/var/lib/kubelet/config.yaml \
{% if KUBE_VER|float < 1.13 %}
--allow-privileged=true \
{% endif %}
--cni-bin-dir={{ bin_dir }} \
--cni-conf-dir=/etc/cni/net.d \
{% if CONTAINER_RUNTIME == "containerd" %}
@ -39,6 +36,7 @@ ExecStart={{ bin_dir }}/kubelet \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
{% endif %}
--hostname-override={{ inventory_hostname }} \
--image-pull-progress-deadline=5m \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--network-plugin=cni \
--pod-infra-container-image={{ SANDBOX_IMAGE }} \