From 7526858ce40c13e09389dfb5558865241f65278f Mon Sep 17 00:00:00 2001 From: Jimmy Song Date: Tue, 10 Oct 2017 14:50:42 +0800 Subject: [PATCH] =?UTF-8?q?=E7=AE=A1=E7=90=86namespace=E4=B8=AD=E7=9A=84?= =?UTF-8?q?=E8=B5=84=E6=BA=90=E9=85=8D=E9=A2=9D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- SUMMARY.md | 1 + guide/resource-quota-management.md | 101 ++++++++++++++++++ .../spark-compute-resources.yaml | 12 +++ .../spark-limit-range.yaml | 13 +++ .../spark-object-counts.yaml | 13 +++ 5 files changed, 140 insertions(+) create mode 100644 guide/resource-quota-management.md create mode 100644 manifests/spark-with-kubernetes-native-scheduler/spark-compute-resources.yaml create mode 100644 manifests/spark-with-kubernetes-native-scheduler/spark-limit-range.yaml create mode 100644 manifests/spark-with-kubernetes-native-scheduler/spark-object-counts.yaml diff --git a/SUMMARY.md b/SUMMARY.md index a21de9852..cf0e347d6 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -32,6 +32,7 @@ - [3.1.1 配置Pod的liveness和readiness探针](guide/configure-liveness-readiness-probes.md) - [3.1.2 配置Pod的Service Account](guide/configure-pod-service-account.md) - [3.1.3 Secret配置](guide/secret-configuration.md) + - [3.2.3 管理namespace中的资源配额](guide/resource-quota-management.md) - [3.2 命令使用](guide/command-usage.md) - [3.2.1 使用kubectl](guide/using-kubectl.md) - [3.2.2 docker用户过度到kubectl命令行指南](guide/docker-cli-to-kubectl.md) diff --git a/guide/resource-quota-management.md b/guide/resource-quota-management.md new file mode 100644 index 000000000..4bc690158 --- /dev/null +++ b/guide/resource-quota-management.md @@ -0,0 +1,101 @@ +# 管理namespace中的资源配额 + +当用多个团队或者用户共用同一个集群的时候难免会有资源竞争的情况发生,这时候就需要对不同团队或用户的资源使用配额做出限制。 + +## 开启资源配额限制功能 + +目前有两种资源分配管理相关的控制策略插件 `ResourceQuota` 和 `LimitRange`。 + +要启用它们只要 API Server 的启动配置的 `KUBE_ADMISSION_CONTROL` 参数中加入了 `ResourceQuota` 的设置,这样就给集群开启了资源配额限制功能,加入 `LimitRange` 可以用来限制一个资源申请的范围限制,参考 [为 namesapce 配置默认的内存请求与限额](https://k8smeetup.github.io/docs/tasks/administer-cluster/memory-default-namespace/) 和 [在 namespace 中配置默认的CPU请求与限额](https://k8smeetup.github.io/docs/tasks/administer-cluster/cpu-default-namespace/)。 + +两种控制策略的作用范围都是对于某一 namespace,`ResourceQuota` 用来限制 namespace 中所有的 Pod 占用的总的资源 request 和 limit,而 `LimitRange` 是用来设置 namespace 中 Pod 的默认的资源 request 和 limit 值。 + +资源配额分为三种类型: + +- 计算资源配额 +- 存储资源配额 +- 对象数量配额 + +关于资源配额的详细信息请参考 kubernetes 官方文档 [资源配额](https://k8smeetup.github.io/docs/concepts/policy/resource-quotas/)。 + +## 示例 + +我们为 `spark-cluster` 这个 namespace 设置 `ResouceQuota` 和 `LimitRange`。 + +以下 yaml 文件可以在 [kubernetes-handbook](https://github.com/rootsongjc/kubernetes-handbook) 的 `manifests/spark-with-kubernetes-native-scheduler` 目录下找到。 + +### 配置计算资源配额 + +配置文件:`spark-compute-resources.yaml` + +```yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: compute-resources + namespace: spark-cluster +spec: + hard: + pods: "20" + requests.cpu: "20" + requests.memory: 100Gi + limits.cpu: "40" + limits.memory: 200Gi +``` + +要想查看该配置只要执行: + +```bash +kubectl -n spark-cluster describe resourcequota compute-resources +``` + +### 配置对象数量限制 + +配置文件:`spark-object-counts.yaml` + +```Yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-counts + namespace: spark-cluster +spec: + hard: + configmaps: "10" + persistentvolumeclaims: "4" + replicationcontrollers: "20" + secrets: "10" + services: "10" + services.loadbalancers: "2" +``` + +### 配置CPU和内存LimitRange + +配置文件:`spark-limit-range.yaml` + +```yaml +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range +spec: + limits: + - default: + memory: 50Gi + cpu: 5 + defaultRequest: + memory: 1Gi + cpu: 1 + type: Container +``` + +- `default` 即 limit 的值 +- `defaultRequest` 即 request 的值 + +## 参考 + +[资源配额](https://k8smeetup.github.io/docs/concepts/policy/resource-quotas/) + +[为命名空间配置默认的内存请求与限额](https://k8smeetup.github.io/docs/tasks/administer-cluster/memory-default-namespace/) + +[在命名空间中配置默认的CPU请求与限额](https://k8smeetup.github.io/docs/tasks/administer-cluster/cpu-default-namespace/) \ No newline at end of file diff --git a/manifests/spark-with-kubernetes-native-scheduler/spark-compute-resources.yaml b/manifests/spark-with-kubernetes-native-scheduler/spark-compute-resources.yaml new file mode 100644 index 000000000..e1eafc070 --- /dev/null +++ b/manifests/spark-with-kubernetes-native-scheduler/spark-compute-resources.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: compute-resources + namespace: spark-cluster +spec: + hard: + pods: "20" + requests.cpu: "20" + requests.memory: 100Gi + limits.cpu: "40" + limits.memory: 200Gi diff --git a/manifests/spark-with-kubernetes-native-scheduler/spark-limit-range.yaml b/manifests/spark-with-kubernetes-native-scheduler/spark-limit-range.yaml new file mode 100644 index 000000000..b34b10d94 --- /dev/null +++ b/manifests/spark-with-kubernetes-native-scheduler/spark-limit-range.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range +spec: + limits: + - default: + memory: 50Gi + cpu: 5 + defaultRequest: + memory: 1Gi + cpu: 1 + type: Container diff --git a/manifests/spark-with-kubernetes-native-scheduler/spark-object-counts.yaml b/manifests/spark-with-kubernetes-native-scheduler/spark-object-counts.yaml new file mode 100644 index 000000000..8cbb2e71e --- /dev/null +++ b/manifests/spark-with-kubernetes-native-scheduler/spark-object-counts.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-counts + namespace: spark-cluster +spec: + hard: + configmaps: "10" + persistentvolumeclaims: "4" + replicationcontrollers: "20" + secrets: "10" + services: "10" + services.loadbalancers: "2"