2427 lines
139 KiB
HTML
2427 lines
139 KiB
HTML
|
||
<!DOCTYPE HTML>
|
||
<html lang="zh-hans" >
|
||
<head>
|
||
<meta charset="UTF-8">
|
||
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||
<title>5.2.2 运行支持kubernetes原生调度的Spark程序 · Kubernetes Handbook</title>
|
||
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||
<meta name="description" content="">
|
||
<meta name="generator" content="GitBook 3.2.2">
|
||
<meta name="author" content="Jimmy Song">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/style.css">
|
||
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-splitter/splitter.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-page-toc-button/plugin.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-image-captions/image-captions.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-back-to-top-button/plugin.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-search-plus/search.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-tbfed-pagefooter/footer.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-highlight/website.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-fontsettings/website.css">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<meta name="HandheldFriendly" content="true"/>
|
||
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
|
||
<meta name="apple-mobile-web-app-capable" content="yes">
|
||
<meta name="apple-mobile-web-app-status-bar-style" content="black">
|
||
<link rel="apple-touch-icon-precomposed" sizes="152x152" href="../gitbook/images/apple-touch-icon-precomposed-152.png">
|
||
<link rel="shortcut icon" href="../gitbook/images/favicon.ico" type="image/x-icon">
|
||
|
||
|
||
<link rel="next" href="serverless.html" />
|
||
|
||
|
||
<link rel="prev" href="spark-standalone-on-kubernetes.html" />
|
||
|
||
|
||
|
||
<link rel="shortcut icon" href='../favicon.ico' type="image/x-icon">
|
||
|
||
|
||
<link rel="bookmark" href='../favicon.ico' type="image/x-icon">
|
||
|
||
|
||
|
||
|
||
<style>
|
||
@media only screen and (max-width: 640px) {
|
||
.book-header .hidden-mobile {
|
||
display: none;
|
||
}
|
||
}
|
||
</style>
|
||
<script>
|
||
window["gitbook-plugin-github-buttons"] = {"repo":"rootsongjc/kubernetes-handbook","types":["star"],"size":"small"};
|
||
</script>
|
||
|
||
</head>
|
||
<body>
|
||
|
||
<div class="book">
|
||
<div class="book-summary">
|
||
|
||
|
||
<div id="book-search-input" role="search">
|
||
<input type="text" placeholder="输入并搜索" />
|
||
</div>
|
||
|
||
|
||
<nav role="navigation">
|
||
|
||
|
||
|
||
<ul class="summary">
|
||
|
||
|
||
|
||
|
||
<li>
|
||
<a href="https://jimmysong.io" target="_blank" class="custom-link">Home</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
<li class="divider"></li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="chapter " data-level="1.1" data-path="../">
|
||
|
||
<a href="../">
|
||
|
||
|
||
序言
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2" data-path="../cloud-native/kubernetes-and-cloud-native-app-overview.html">
|
||
|
||
<a href="../cloud-native/kubernetes-and-cloud-native-app-overview.html">
|
||
|
||
|
||
1. Kubernetes与云原生应用概览
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3" data-path="../concepts/">
|
||
|
||
<a href="../concepts/">
|
||
|
||
|
||
2. 概念原理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.1" data-path="../concepts/concepts.html">
|
||
|
||
<a href="../concepts/concepts.html">
|
||
|
||
|
||
2.1 设计理念
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2" data-path="../concepts/objects.html">
|
||
|
||
<a href="../concepts/objects.html">
|
||
|
||
|
||
2.2 Objects
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.2.1" data-path="../concepts/pod-overview.html">
|
||
|
||
<a href="../concepts/pod-overview.html">
|
||
|
||
|
||
2.2.1 Pod
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.2.1.1" data-path="../concepts/pod.html">
|
||
|
||
<a href="../concepts/pod.html">
|
||
|
||
|
||
2.2.1.1 Pod解析
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.1.2" data-path="../concepts/init-containers.html">
|
||
|
||
<a href="../concepts/init-containers.html">
|
||
|
||
|
||
2.2.1.2 Init容器
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.1.3" data-path="../concepts/pod-security-policy.html">
|
||
|
||
<a href="../concepts/pod-security-policy.html">
|
||
|
||
|
||
2.2.1.3 Pod安全策略
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.1.4" data-path="../concepts/pod-lifecycle.html">
|
||
|
||
<a href="../concepts/pod-lifecycle.html">
|
||
|
||
|
||
2.2.1.4 Pod的生命周期
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.2" data-path="../concepts/node.html">
|
||
|
||
<a href="../concepts/node.html">
|
||
|
||
|
||
2.2.2 Node
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.3" data-path="../concepts/namespace.html">
|
||
|
||
<a href="../concepts/namespace.html">
|
||
|
||
|
||
2.2.3 Namespace
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.4" data-path="../concepts/service.html">
|
||
|
||
<a href="../concepts/service.html">
|
||
|
||
|
||
2.2.4 Service
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.5" data-path="../concepts/volume.html">
|
||
|
||
<a href="../concepts/volume.html">
|
||
|
||
|
||
2.2.5 Volume和Persistent Volume
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.6" data-path="../concepts/deployment.html">
|
||
|
||
<a href="../concepts/deployment.html">
|
||
|
||
|
||
2.2.6 Deployment
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.7" data-path="../concepts/secret.html">
|
||
|
||
<a href="../concepts/secret.html">
|
||
|
||
|
||
2.2.7 Secret
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.8" data-path="../concepts/statefulset.html">
|
||
|
||
<a href="../concepts/statefulset.html">
|
||
|
||
|
||
2.2.8 StatefulSet
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.9" data-path="../concepts/daemonset.html">
|
||
|
||
<a href="../concepts/daemonset.html">
|
||
|
||
|
||
2.2.9 DaemonSet
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.10" data-path="../concepts/serviceaccount.html">
|
||
|
||
<a href="../concepts/serviceaccount.html">
|
||
|
||
|
||
2.2.10 ServiceAccount
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.11" data-path="../concepts/replicaset.html">
|
||
|
||
<a href="../concepts/replicaset.html">
|
||
|
||
|
||
2.2.11 ReplicationController和ReplicaSet
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.12" data-path="../concepts/job.html">
|
||
|
||
<a href="../concepts/job.html">
|
||
|
||
|
||
2.2.12 Job
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.13" data-path="../concepts/cronjob.html">
|
||
|
||
<a href="../concepts/cronjob.html">
|
||
|
||
|
||
2.2.13 CronJob
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.14" data-path="../concepts/ingress.html">
|
||
|
||
<a href="../concepts/ingress.html">
|
||
|
||
|
||
2.2.14 Ingress
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.15" data-path="../concepts/configmap.html">
|
||
|
||
<a href="../concepts/configmap.html">
|
||
|
||
|
||
2.2.15 ConfigMap
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.16" data-path="../concepts/horizontal-pod-autoscaling.html">
|
||
|
||
<a href="../concepts/horizontal-pod-autoscaling.html">
|
||
|
||
|
||
2.2.16 Horizontal Pod Autoscaling
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.17" data-path="../concepts/label.html">
|
||
|
||
<a href="../concepts/label.html">
|
||
|
||
|
||
2.2.17 Label
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.18" data-path="../concepts/garbage-collection.html">
|
||
|
||
<a href="../concepts/garbage-collection.html">
|
||
|
||
|
||
2.2.18 垃圾收集
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.19" data-path="../concepts/network-policy.html">
|
||
|
||
<a href="../concepts/network-policy.html">
|
||
|
||
|
||
2.2.19 NetworkPolicy
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4" data-path="../guide/">
|
||
|
||
<a href="../guide/">
|
||
|
||
|
||
3. 用户指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.1" data-path="../guide/resource-configuration.html">
|
||
|
||
<a href="../guide/resource-configuration.html">
|
||
|
||
|
||
3.1 资源对象配置
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.1.1" data-path="../guide/configure-liveness-readiness-probes.html">
|
||
|
||
<a href="../guide/configure-liveness-readiness-probes.html">
|
||
|
||
|
||
3.1.1 配置Pod的liveness和readiness探针
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.2" data-path="../guide/configure-pod-service-account.html">
|
||
|
||
<a href="../guide/configure-pod-service-account.html">
|
||
|
||
|
||
3.1.2 配置Pod的Service Account
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.3" data-path="../guide/secret-configuration.html">
|
||
|
||
<a href="../guide/secret-configuration.html">
|
||
|
||
|
||
3.1.3 Secret配置
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.4" data-path="../guide/resource-quota-management.html">
|
||
|
||
<a href="../guide/resource-quota-management.html">
|
||
|
||
|
||
3.2.3 管理namespace中的资源配额
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2" data-path="../guide/command-usage.html">
|
||
|
||
<a href="../guide/command-usage.html">
|
||
|
||
|
||
3.2 命令使用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.2.1" data-path="../guide/docker-cli-to-kubectl.html">
|
||
|
||
<a href="../guide/docker-cli-to-kubectl.html">
|
||
|
||
|
||
3.2.1 docker用户过度到kubectl命令行指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2.2" data-path="../guide/using-kubectl.html">
|
||
|
||
<a href="../guide/using-kubectl.html">
|
||
|
||
|
||
3.2.2 kubectl命令概览
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2.3" data-path="../guide/kubectl-cheatsheet.html">
|
||
|
||
<a href="../guide/kubectl-cheatsheet.html">
|
||
|
||
|
||
3.2.3 kubectl命令技巧大全
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3" data-path="../guide/cluster-security-management.html">
|
||
|
||
<a href="../guide/cluster-security-management.html">
|
||
|
||
|
||
3.3 集群安全性管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.3.1" data-path="../guide/managing-tls-in-a-cluster.html">
|
||
|
||
<a href="../guide/managing-tls-in-a-cluster.html">
|
||
|
||
|
||
3.3.1 管理集群中的TLS
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.2" data-path="../guide/kubelet-authentication-authorization.html">
|
||
|
||
<a href="../guide/kubelet-authentication-authorization.html">
|
||
|
||
|
||
3.3.2 kubelet的认证授权
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.3" data-path="../guide/tls-bootstrapping.html">
|
||
|
||
<a href="../guide/tls-bootstrapping.html">
|
||
|
||
|
||
3.3.3 TLS bootstrap
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.4" data-path="../guide/kubectl-user-authentication-authorization.html">
|
||
|
||
<a href="../guide/kubectl-user-authentication-authorization.html">
|
||
|
||
|
||
3.3.4 创建用户认证授权的kubeconfig文件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.5" data-path="../guide/rbac.html">
|
||
|
||
<a href="../guide/rbac.html">
|
||
|
||
|
||
3.3.5 RBAC——基于角色的访问控制
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.6" data-path="../guide/ip-masq-agent.html">
|
||
|
||
<a href="../guide/ip-masq-agent.html">
|
||
|
||
|
||
3.3.6 IP伪装代理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4" data-path="../guide/access-kubernetes-cluster.html">
|
||
|
||
<a href="../guide/access-kubernetes-cluster.html">
|
||
|
||
|
||
3.4 访问 Kubernetes 集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.4.1" data-path="../guide/access-cluster.html">
|
||
|
||
<a href="../guide/access-cluster.html">
|
||
|
||
|
||
3.4.1 访问集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4.2" data-path="../guide/authenticate-across-clusters-kubeconfig.html">
|
||
|
||
<a href="../guide/authenticate-across-clusters-kubeconfig.html">
|
||
|
||
|
||
3.4.2 使用 kubeconfig 文件配置跨集群认证
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4.3" data-path="../guide/connecting-to-applications-port-forward.html">
|
||
|
||
<a href="../guide/connecting-to-applications-port-forward.html">
|
||
|
||
|
||
3.4.3 通过端口转发访问集群中的应用程序
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4.4" data-path="../guide/service-access-application-cluster.html">
|
||
|
||
<a href="../guide/service-access-application-cluster.html">
|
||
|
||
|
||
3.4.4 使用 service 访问群集中的应用程序
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.5" data-path="../guide/application-development-deployment-flow.html">
|
||
|
||
<a href="../guide/application-development-deployment-flow.html">
|
||
|
||
|
||
3.5 在kubernetes中开发部署应用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.5.1" data-path="../guide/deploy-applications-in-kubernetes.html">
|
||
|
||
<a href="../guide/deploy-applications-in-kubernetes.html">
|
||
|
||
|
||
3.5.1 适用于kubernetes的应用开发部署流程
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.5.2" data-path="../guide/migrating-hadoop-yarn-to-kubernetes.html">
|
||
|
||
<a href="../guide/migrating-hadoop-yarn-to-kubernetes.html">
|
||
|
||
|
||
3.5.2 迁移传统应用到kubernetes中——以Hadoop YARN为例
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.5.3" data-path="../guide/using-statefulset.html">
|
||
|
||
<a href="../guide/using-statefulset.html">
|
||
|
||
|
||
3.5.3 使用StatefulSet部署用状态应用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5" data-path="../practice/">
|
||
|
||
<a href="../practice/">
|
||
|
||
|
||
4. 最佳实践
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.1" data-path="../practice/install-kbernetes1.6-on-centos.html">
|
||
|
||
<a href="../practice/install-kbernetes1.6-on-centos.html">
|
||
|
||
|
||
4.1 在CentOS上部署kubernetes1.6集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.1.1" data-path="../practice/create-tls-and-secret-key.html">
|
||
|
||
<a href="../practice/create-tls-and-secret-key.html">
|
||
|
||
|
||
4.1.1 创建TLS证书和秘钥
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.2" data-path="../practice/create-kubeconfig.html">
|
||
|
||
<a href="../practice/create-kubeconfig.html">
|
||
|
||
|
||
4.1.2 创建kubeconfig文件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.3" data-path="../practice/etcd-cluster-installation.html">
|
||
|
||
<a href="../practice/etcd-cluster-installation.html">
|
||
|
||
|
||
4.1.3 创建高可用etcd集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.4" data-path="../practice/kubectl-installation.html">
|
||
|
||
<a href="../practice/kubectl-installation.html">
|
||
|
||
|
||
4.1.4 安装kubectl命令行工具
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.5" data-path="../practice/master-installation.html">
|
||
|
||
<a href="../practice/master-installation.html">
|
||
|
||
|
||
4.1.5 部署master节点
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.6" data-path="../practice/node-installation.html">
|
||
|
||
<a href="../practice/node-installation.html">
|
||
|
||
|
||
4.1.6 部署node节点
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.7" data-path="../practice/kubedns-addon-installation.html">
|
||
|
||
<a href="../practice/kubedns-addon-installation.html">
|
||
|
||
|
||
4.1.7 安装kubedns插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.8" data-path="../practice/dashboard-addon-installation.html">
|
||
|
||
<a href="../practice/dashboard-addon-installation.html">
|
||
|
||
|
||
4.1.8 安装dashboard插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.9" data-path="../practice/heapster-addon-installation.html">
|
||
|
||
<a href="../practice/heapster-addon-installation.html">
|
||
|
||
|
||
4.1.9 安装heapster插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.1.10" data-path="../practice/efk-addon-installation.html">
|
||
|
||
<a href="../practice/efk-addon-installation.html">
|
||
|
||
|
||
4.1.10 安装EFK插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2" data-path="../practice/service-discovery-and-loadbalancing.html">
|
||
|
||
<a href="../practice/service-discovery-and-loadbalancing.html">
|
||
|
||
|
||
4.2 服务发现与负载均衡
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.2.1" data-path="../practice/traefik-ingress-installation.html">
|
||
|
||
<a href="../practice/traefik-ingress-installation.html">
|
||
|
||
|
||
4.2.1 安装Traefik ingress
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2.2" data-path="../practice/distributed-load-test.html">
|
||
|
||
<a href="../practice/distributed-load-test.html">
|
||
|
||
|
||
4.2.2 分布式负载测试
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2.3" data-path="../practice/network-and-cluster-perfermance-test.html">
|
||
|
||
<a href="../practice/network-and-cluster-perfermance-test.html">
|
||
|
||
|
||
4.2.3 网络和集群性能测试
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2.4" data-path="../practice/edge-node-configuration.html">
|
||
|
||
<a href="../practice/edge-node-configuration.html">
|
||
|
||
|
||
4.2.4 边缘节点配置
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2.5" data-path="../practice/nginx-ingress-installation.html">
|
||
|
||
<a href="../practice/nginx-ingress-installation.html">
|
||
|
||
|
||
4.2.5 安装Nginx ingress
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3" data-path="../practice/operation.html">
|
||
|
||
<a href="../practice/operation.html">
|
||
|
||
|
||
4.3 运维管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.3.1" data-path="../practice/service-rolling-update.html">
|
||
|
||
<a href="../practice/service-rolling-update.html">
|
||
|
||
|
||
4.3.1 服务滚动升级
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.2" data-path="../practice/app-log-collection.html">
|
||
|
||
<a href="../practice/app-log-collection.html">
|
||
|
||
|
||
4.3.2 应用日志收集
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.3" data-path="../practice/configuration-best-practice.html">
|
||
|
||
<a href="../practice/configuration-best-practice.html">
|
||
|
||
|
||
4.3.3 配置最佳实践
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.4" data-path="../practice/monitor.html">
|
||
|
||
<a href="../practice/monitor.html">
|
||
|
||
|
||
4.3.4 集群及应用监控
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.5" data-path="../practice/data-persistence-problem.html">
|
||
|
||
<a href="../practice/data-persistence-problem.html">
|
||
|
||
|
||
4.3.6 数据持久化问题
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.6" data-path="../practice/manage-compute-resources-container.html">
|
||
|
||
<a href="../practice/manage-compute-resources-container.html">
|
||
|
||
|
||
4.3.7 管理容器的计算资源
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.7" data-path="../practice/using-prometheus-to-monitor-kuberentes-cluster.html">
|
||
|
||
<a href="../practice/using-prometheus-to-monitor-kuberentes-cluster.html">
|
||
|
||
|
||
4.3.8 使用Prometheus监控kubernetes集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3.8" data-path="../practice/using-heapster-to-get-object-metrics.html">
|
||
|
||
<a href="../practice/using-heapster-to-get-object-metrics.html">
|
||
|
||
|
||
4.3.9 使用Heapster获取集群和对象的metric数据
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.4" data-path="../practice/storage.html">
|
||
|
||
<a href="../practice/storage.html">
|
||
|
||
|
||
4.4 存储管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.4.1" data-path="../practice/glusterfs.html">
|
||
|
||
<a href="../practice/glusterfs.html">
|
||
|
||
|
||
4.4.1 GlusterFS
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.4.1.1" data-path="../practice/using-glusterfs-for-persistent-storage.html">
|
||
|
||
<a href="../practice/using-glusterfs-for-persistent-storage.html">
|
||
|
||
|
||
4.4.1.1 使用GlusterFS做持久化存储
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.4.1.2" data-path="../practice/storage-for-containers-using-glusterfs-with-openshift.html">
|
||
|
||
<a href="../practice/storage-for-containers-using-glusterfs-with-openshift.html">
|
||
|
||
|
||
4.4.1.2 在OpenShift中使用GlusterFS做持久化存储
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.4.2" data-path="../practice/cephfs.html">
|
||
|
||
<a href="../practice/cephfs.html">
|
||
|
||
|
||
4.4.2 CephFS
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.4.2.1" data-path="../practice/using-ceph-for-persistent-storage.html">
|
||
|
||
<a href="../practice/using-ceph-for-persistent-storage.html">
|
||
|
||
|
||
4.4.2.1 使用Ceph做持久化存储
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.5" data-path="../practice/services-management-tool.html">
|
||
|
||
<a href="../practice/services-management-tool.html">
|
||
|
||
|
||
4.5 服务编排管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.5.1" data-path="../practice/helm.html">
|
||
|
||
<a href="../practice/helm.html">
|
||
|
||
|
||
4.5.1 使用Helm管理kubernetes应用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.5.2" data-path="../practice/create-private-charts-repo.html">
|
||
|
||
<a href="../practice/create-private-charts-repo.html">
|
||
|
||
|
||
4.5.2 构建私有Chart仓库
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.6" data-path="../practice/ci-cd.html">
|
||
|
||
<a href="../practice/ci-cd.html">
|
||
|
||
|
||
4.6 持续集成与发布
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.6.1" data-path="../practice/jenkins-ci-cd.html">
|
||
|
||
<a href="../practice/jenkins-ci-cd.html">
|
||
|
||
|
||
4.6.1 使用Jenkins进行持续集成与发布
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.6.2" data-path="../practice/drone-ci-cd.html">
|
||
|
||
<a href="../practice/drone-ci-cd.html">
|
||
|
||
|
||
4.6.2 使用Drone进行持续集成与发布
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.7" data-path="../practice/update-and-upgrade.html">
|
||
|
||
<a href="../practice/update-and-upgrade.html">
|
||
|
||
|
||
4.7 更新与升级
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.7.1" data-path="../practice/manually-upgrade.html">
|
||
|
||
<a href="../practice/manually-upgrade.html">
|
||
|
||
|
||
4.7.1 手动升级kubernetes集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.7.2" data-path="../practice/dashboard-upgrade.html">
|
||
|
||
<a href="../practice/dashboard-upgrade.html">
|
||
|
||
|
||
4.7.2 升级dashboard
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6" data-path="./">
|
||
|
||
<a href="./">
|
||
|
||
|
||
5. 领域应用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.1" data-path="microservices.html">
|
||
|
||
<a href="microservices.html">
|
||
|
||
|
||
5.1 微服务架构
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.1.1" data-path="service-discovery-in-microservices.html">
|
||
|
||
<a href="service-discovery-in-microservices.html">
|
||
|
||
|
||
5.1.1 微服务中的服务发现
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.2" data-path="service-mesh.html">
|
||
|
||
<a href="service-mesh.html">
|
||
|
||
|
||
5.2 Service Mesh 服务网格
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.2.1" data-path="istio.html">
|
||
|
||
<a href="istio.html">
|
||
|
||
|
||
5.1.1 Istio
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.2.1.1" data-path="istio-installation.html">
|
||
|
||
<a href="istio-installation.html">
|
||
|
||
|
||
5.1.1.1 安装istio
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.2.1.2" data-path="configuring-request-routing.html">
|
||
|
||
<a href="configuring-request-routing.html">
|
||
|
||
|
||
5.1.1.2 配置请求的路由规则
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.2.2" data-path="linkerd.html">
|
||
|
||
<a href="linkerd.html">
|
||
|
||
|
||
5.1.2 Linkerd
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.2.2.1" data-path="linkerd-user-guide.html">
|
||
|
||
<a href="linkerd-user-guide.html">
|
||
|
||
|
||
5.1.2.1 Linkerd 使用指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.3" data-path="big-data.html">
|
||
|
||
<a href="big-data.html">
|
||
|
||
|
||
5.2 大数据
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.3.1" data-path="spark-standalone-on-kubernetes.html">
|
||
|
||
<a href="spark-standalone-on-kubernetes.html">
|
||
|
||
|
||
5.2.1 Spark standalone on Kubernetes
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter active" data-level="1.6.3.2" data-path="running-spark-with-kubernetes-native-scheduler.html">
|
||
|
||
<a href="running-spark-with-kubernetes-native-scheduler.html">
|
||
|
||
|
||
5.2.2 运行支持kubernetes原生调度的Spark程序
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.4" data-path="serverless.html">
|
||
|
||
<a href="serverless.html">
|
||
|
||
|
||
5.3 Serverless架构
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.5" data-path="edge-computing.html">
|
||
|
||
<a href="edge-computing.html">
|
||
|
||
|
||
5.4 边缘计算
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7" data-path="../develop/">
|
||
|
||
<a href="../develop/">
|
||
|
||
|
||
6. 开发指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.7.1" data-path="../develop/developing-environment.html">
|
||
|
||
<a href="../develop/developing-environment.html">
|
||
|
||
|
||
6.1 开发环境搭建
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7.2" data-path="../develop/testing.html">
|
||
|
||
<a href="../develop/testing.html">
|
||
|
||
|
||
6.2 单元测试和集成测试
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7.3" data-path="../develop/client-go-sample.html">
|
||
|
||
<a href="../develop/client-go-sample.html">
|
||
|
||
|
||
6.3 client-go示例
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7.4" data-path="../develop/contribute.html">
|
||
|
||
<a href="../develop/contribute.html">
|
||
|
||
|
||
6.4 社区贡献
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7.5" data-path="../develop/minikube.html">
|
||
|
||
<a href="../develop/minikube.html">
|
||
|
||
|
||
6.5 Minikube
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.8" data-path="../appendix/">
|
||
|
||
<a href="../appendix/">
|
||
|
||
|
||
7. 附录
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.8.1" data-path="../appendix/docker-best-practice.html">
|
||
|
||
<a href="../appendix/docker-best-practice.html">
|
||
|
||
|
||
7.1 Docker最佳实践
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.8.2" data-path="../appendix/issues.html">
|
||
|
||
<a href="../appendix/issues.html">
|
||
|
||
|
||
7.2 问题记录
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.8.3" data-path="../appendix/tricks.html">
|
||
|
||
<a href="../appendix/tricks.html">
|
||
|
||
|
||
7.3 使用技巧
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.8.4" data-path="../appendix/debug-kubernetes-services.html">
|
||
|
||
<a href="../appendix/debug-kubernetes-services.html">
|
||
|
||
|
||
7.4 kubernetes中的应用故障排查
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.8.5" data-path="../appendix/material-share.html">
|
||
|
||
<a href="../appendix/material-share.html">
|
||
|
||
|
||
7.5 Kubernetes相关资讯和情报链接
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
<li class="divider"></li>
|
||
|
||
<li>
|
||
<a href="https://www.gitbook.com" target="blank" class="gitbook-link">
|
||
本书使用 GitBook 发布
|
||
</a>
|
||
</li>
|
||
</ul>
|
||
|
||
|
||
</nav>
|
||
|
||
|
||
</div>
|
||
|
||
<div class="book-body">
|
||
|
||
<div class="body-inner">
|
||
|
||
|
||
|
||
<div class="book-header" role="navigation">
|
||
|
||
|
||
<!-- Title -->
|
||
<h1>
|
||
<i class="fa fa-circle-o-notch fa-spin"></i>
|
||
<a href=".." >5.2.2 运行支持kubernetes原生调度的Spark程序</a>
|
||
</h1>
|
||
</div>
|
||
|
||
|
||
|
||
|
||
<div class="page-wrapper" tabindex="-1" role="main">
|
||
<div class="page-inner">
|
||
|
||
<div class="search-plus" id="book-search-results">
|
||
<div class="search-noresults">
|
||
|
||
<section class="normal markdown-section">
|
||
|
||
<h1 id="运行支持kubernetes原生调度的spark程序">运行支持kubernetes原生调度的Spark程序</h1>
|
||
<p>TL;DR 这个主题比较大,该开源项目也还在不断进行中,我单独做了一个 web 用来记录 spark on kubernetes 的研究和最新进展见: <a href="https://jimmysong.io/spark-on-k8s" target="_blank">https://jimmysong.io/spark-on-k8s</a></p>
|
||
<p>我们之前就在 kubernetes 中运行过 standalone 方式的 spark 集群,见 <a href="spark-standalone-on-kubernetes.html">Spark standalone on kubernetes</a>。</p>
|
||
<p>目前运行支持 kubernetes 原生调度的 spark 程序由 Google 主导,目前运行支持 kubernetes 原生调度的 spark 程序由 Google 主导,fork 自 spark 的官方代码库,见<a href="https://github.com/apache-spark-on-k8s/spark/" target="_blank">https://github.com/apache-spark-on-k8s/spark/</a> ,属于Big Data SIG。</p>
|
||
<p>参与到该项目的公司有:</p>
|
||
<ul>
|
||
<li>Bloomberg</li>
|
||
<li>Google</li>
|
||
<li>Haiwen</li>
|
||
<li>Hyperpilot</li>
|
||
<li>Intel</li>
|
||
<li>Palantir</li>
|
||
<li>Pepperdata</li>
|
||
<li>Red Hat</li>
|
||
</ul>
|
||
<h2 id="为何使用-spark-on-kubernetes">为何使用 spark on kubernetes</h2>
|
||
<p>使用kubernetes原生调度的spark on kubernetes是对现有的spark on yarn/mesos的资源使用方式的革命性的改进,主要表现在以下几点:</p>
|
||
<ol>
|
||
<li>Kubernetes原生调度:不再需要二层调度,直接使用kubernetes的资源调度功能,跟其他应用共用整个kubernetes管理的资源池;</li>
|
||
<li>资源隔离,粒度更细:原先yarn中的queue在spark on kubernetes中已不存在,取而代之的是kubernetes中原生的namespace,可以为每个用户分别指定一个namespace,限制用户的资源quota;</li>
|
||
<li>细粒度的资源分配:可以给每个spark任务指定资源限制,实际指定多少资源就使用多少资源,因为没有了像yarn那样的二层调度(圈地式的),所以可以更高效和细粒度的使用资源;</li>
|
||
<li>监控的变革:因为做到了细粒度的资源分配,所以可以对用户提交的每一个任务做到资源使用的监控,从而判断用户的资源使用情况,所有的metric都记录在数据库中,甚至可以为每个用户的每次任务提交计量;</li>
|
||
<li>日志的变革:用户不再通过yarn的web页面来查看任务状态,而是通过pod的log来查看,可将所有的kuberentes中的应用的日志等同看待收集起来,然后可以根据标签查看对应应用的日志;</li>
|
||
</ol>
|
||
<p>所有这些变革都可以让我们更高效的获取资源、更有效率的获取资源!</p>
|
||
<h2 id="spark-概念说明">Spark 概念说明</h2>
|
||
<p><a href="http://spark.apache.org" target="_blank">Apache Spark</a> 是一个围绕速度、易用性和复杂分析构建的大数据处理框架。最初在2009年由加州大学伯克利分校的AMPLab开发,并于2010年成为Apache的开源项目之一。</p>
|
||
<p>在 Spark 中包括如下组件或概念:</p>
|
||
<ul>
|
||
<li><strong>Application</strong>:Spark Application 的概念和 Hadoop 中的 MapReduce 类似,指的是用户编写的 Spark 应用程序,包含了一个 Driver 功能的代码和分布在集群中多个节点上运行的 Executor 代码;</li>
|
||
<li><strong>Driver</strong>:Spark 中的 Driver 即运行上述 Application 的 main() 函数并且创建 SparkContext,其中创建 SparkContext 的目的是为了准备Spark应用程序的运行环境。在 Spark 中由 SparkContext 负责和 ClusterManager 通信,进行资源的申请、任务的分配和监控等;当 Executor 部分运行完毕后,Driver负责将SparkContext 关闭。通常用 SparkContext 代表 Driver;</li>
|
||
<li><strong>Executor</strong>:Application运行在Worker 节点上的一个进程,该进程负责运行Task,并且负责将数据存在内存或者磁盘上,每个Application都有各自独立的一批Executor。在Spark on Yarn模式下,其进程名称为<code>CoarseGrainedExecutorBackend</code>,类似于 Hadoop MapReduce 中的 YarnChild。一个 <code>CoarseGrainedExecutorBackend</code> 进程有且仅有一个 executor 对象,它负责将 Task 包装成 taskRunner,并从线程池中抽取出一个空闲线程运行 Task。每个 <code>CoarseGrainedExecutorBackend</code> 能并行运行 Task 的数量就取决于分配给它的 CPU 的个数了;</li>
|
||
<li><strong>Cluster Manager</strong>:指的是在集群上获取资源的外部服务,目前有:<ul>
|
||
<li>Standalone:Spark原生的资源管理,由Master负责资源的分配;</li>
|
||
<li>Hadoop Yarn:由YARN中的ResourceManager负责资源的分配;</li>
|
||
</ul>
|
||
</li>
|
||
<li><strong>Worker</strong>:集群中任何可以运行Application代码的节点,类似于YARN中的NodeManager节点。在Standalone模式中指的就是通过Slave文件配置的Worker节点,在Spark on Yarn模式中指的就是NodeManager节点;</li>
|
||
<li><strong>作业(Job)</strong>:包含多个Task组成的并行计算,往往由Spark Action催生,一个JOB包含多个RDD及作用于相应RDD上的各种Operation;</li>
|
||
<li><strong>阶段(Stage)</strong>:每个Job会被拆分很多组 Task,每组任务被称为Stage,也可称TaskSet,一个作业分为多个阶段,每一个stage的分割点是action。比如一个job是:(transformation1 -> transformation1 -> action1 -> transformation3 -> action2),这个job就会被分为两个stage,分割点是action1和action2。</li>
|
||
<li><p><strong>任务(Task)</strong>: 被送到某个Executor上的工作任务;</p>
|
||
</li>
|
||
<li><p><strong>Context</strong>:启动spark application的时候创建,作为Spark 运行时环境。</p>
|
||
</li>
|
||
<li><strong>Dynamic Allocation(动态资源分配)</strong>:一个配置选项,可以将其打开。从Spark1.2之后,对于On Yarn模式,已经支持动态资源分配(Dynamic Resource Allocation),这样,就可以根据Application的负载(Task情况),动态的增加和减少executors,这种策略非常适合在YARN上使用spark-sql做数据开发和分析,以及将spark-sql作为长服务来使用的场景。Executor 的动态分配需要在 cluster mode 下启用 "external shuffle service"。</li>
|
||
<li><strong>动态资源分配策略</strong>:开启动态分配策略后,application会在task因没有足够资源被挂起的时候去动态申请资源,这意味着该application现有的executor无法满足所有task并行运行。spark一轮一轮的申请资源,当有task挂起或等待 <code>spark.dynamicAllocation.schedulerBacklogTimeout</code> (默认1s)时间的时候,会开始动态资源分配;之后会每隔 <code>spark.dynamicAllocation.sustainedSchedulerBacklogTimeout</code> (默认1s)时间申请一次,直到申请到足够的资源。每次申请的资源量是指数增长的,即1,2,4,8等。之所以采用指数增长,出于两方面考虑:其一,开始申请的少是考虑到可能application会马上得到满足;其次要成倍增加,是为了防止application需要很多资源,而该方式可以在很少次数的申请之后得到满足。</li>
|
||
</ul>
|
||
<h2 id="架构设计">架构设计</h2>
|
||
<p>关于 spark standalone 的局限性与 kubernetes native spark 架构之间的区别请参考 Anirudh Ramanathan 在 2016年10月8日提交的 issue <a href="https://github.com/kubernetes/kubernetes/issues/34377" target="_blank">Support Spark natively in Kubernetes #34377</a>。</p>
|
||
<p>简而言之,spark standalone on kubernetes 有如下几个缺点:</p>
|
||
<ul>
|
||
<li>无法对于多租户做隔离,每个用户都想给 pod 申请 node 节点可用的最大的资源。</li>
|
||
<li>Spark 的 master/worker 本来不是设计成使用 kubernetes 的资源调度,这样会存在两层的资源调度问题,不利于与 kuberentes 集成。</li>
|
||
</ul>
|
||
<p>而 kubernetes native spark 集群中,spark 可以调用 kubernetes API 获取集群资源和调度。要实现 kubernetes native spark 需要为 spark 提供一个集群外部的 manager 可以用来跟 kubernetes API 交互。</p>
|
||
<h3 id="调度器后台">调度器后台</h3>
|
||
<p>使用 kubernetes 原生调度的 spark 的基本设计思路是将 spark 的 driver 和 executor 都放在 kubernetes 的 pod 中运行,另外还有两个附加的组件:<code>ResourceStagingServer</code> 和 <code>KubernetesExternalShuffleService</code>。</p>
|
||
<p>Spark driver 其实可以运行在 kubernetes 集群内部(cluster mode)可以运行在外部(client mode),executor 只能运行在集群内部,当有 spark 作业提交到 kubernetes 集群上时,调度器后台将会为 executor pod 设置如下属性:</p>
|
||
<ul>
|
||
<li>使用我们预先编译好的包含 kubernetes 支持的 spark 镜像,然后调用 <code>CoarseGrainedExecutorBackend</code> main class 启动 JVM。</li>
|
||
<li>调度器后台为 executor pod 的运行时注入环境变量,例如各种 JVM 参数,包括用户在 <code>spark-submit</code> 时指定的那些参数。</li>
|
||
<li>Executor 的 CPU、内存限制根据这些注入的环境变量保存到应用程序的 <code>SparkConf</code> 中。</li>
|
||
<li>可以在配置中指定 spark 运行在指定的 namespace 中。</li>
|
||
</ul>
|
||
<p>参考:<a href="https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/architecture-docs/scheduler-backend.md" target="_blank">Scheduler backend 文档</a></p>
|
||
<h2 id="安装指南">安装指南</h2>
|
||
<p>我们可以直接使用官方已编译好的 docker 镜像来部署,下面是官方发布的镜像:</p>
|
||
<table>
|
||
<thead>
|
||
<tr>
|
||
<th>组件</th>
|
||
<th>镜像</th>
|
||
</tr>
|
||
</thead>
|
||
<tbody>
|
||
<tr>
|
||
<td>Spark Driver Image</td>
|
||
<td><code>kubespark/spark-driver:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>Spark Executor Image</td>
|
||
<td><code>kubespark/spark-executor:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>Spark Initialization Image</td>
|
||
<td><code>kubespark/spark-init:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>Spark Staging Server Image</td>
|
||
<td><code>kubespark/spark-resource-staging-server:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>PySpark Driver Image</td>
|
||
<td><code>kubespark/driver-py:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>PySpark Executor Image</td>
|
||
<td><code>kubespark/executor-py:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
</tbody>
|
||
</table>
|
||
<p>我将这些镜像放到了我的私有镜像仓库中了。</p>
|
||
<p>还需要安装支持 kubernetes 的 spark 客户端,在这里下载:<a href="https://github.com/apache-spark-on-k8s/spark/releases" target="_blank">https://github.com/apache-spark-on-k8s/spark/releases</a></p>
|
||
<p>根据使用的镜像版本,我下载的是 <a href="https://github.com/apache-spark-on-k8s/spark/releases/tag/v2.1.0-kubernetes-0.3.1" target="_blank">v2.1.0-kubernetes-0.3.1</a> </p>
|
||
<p><strong>运行 SparkPi 测试</strong></p>
|
||
<p>我们将任务运行在 <code>spark-cluster</code> 的 namespace 中,启动 5 个 executor 实例。</p>
|
||
<pre><code class="lang-bash">./bin/spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/kubespark-spark-driver:v2.1.0-kubernetes-0.3.1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/kubespark-spark-executor:v2.1.0-kubernetes-0.3.1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/kubespark-spark-init:v2.1.0-kubernetes-0.3.1 \
|
||
<span class="hljs-built_in">local</span>:///opt/spark/examples/jars/spark-examples_2.11-2.1.0-k8s-0.3.1-SNAPSHOT.jar
|
||
</code></pre>
|
||
<p>关于该命令参数的介绍请参考:<a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html" target="_blank">https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html</a></p>
|
||
<p><strong>注意:</strong> 该 jar 包实际上是 <code>spark.kubernetes.executor.docker.image</code> 镜像中的。</p>
|
||
<p>这时候提交任务运行还是失败,报错信息中可以看到两个问题:</p>
|
||
<ul>
|
||
<li>Executor 无法找到 driver pod</li>
|
||
<li>用户 <code>system:serviceaccount:spark-cluster:defaul</code> 没有权限获取 <code>spark-cluster</code> 中的 pod 信息。</li>
|
||
</ul>
|
||
<p>提了个 issue <a href="https://github.com/apache-spark-on-k8s/spark/issues/478" target="_blank">Failed to run the sample spark-pi test using spark-submit on the doc #478</a> </p>
|
||
<p>需要为 spark 集群创建一个 <code>serviceaccount</code> 和 <code>clusterrolebinding</code>:</p>
|
||
<pre><code class="lang-bash">kubectl create serviceaccount spark --namespace spark-cluster
|
||
kubectl create rolebinding spark-edit --clusterrole=edit --serviceaccount=spark-cluster:spark --namespace=spark-cluster
|
||
</code></pre>
|
||
<p>该 Bug 将在新版本中修复。</p>
|
||
<h2 id="用户指南">用户指南</h2>
|
||
<h3 id="编译">编译</h3>
|
||
<p>Fork 并克隆项目到本地:</p>
|
||
<pre><code class="lang-bash">git <span class="hljs-built_in">clone</span> https://github.com/rootsongjc/spark.git
|
||
</code></pre>
|
||
<p>编译前请确保你的环境中已经安装 Java8 和 Maven3。</p>
|
||
<pre><code class="lang-bash"><span class="hljs-comment">## 第一次编译前需要安装依赖</span>
|
||
build/mvn install -Pkubernetes -pl resource-managers/kubernetes/core -am -DskipTests
|
||
|
||
<span class="hljs-comment">## 编译 spark on kubernetes</span>
|
||
build/mvn compile -Pkubernetes -pl resource-managers/kubernetes/core -am -DskipTests
|
||
|
||
<span class="hljs-comment">## 发布</span>
|
||
dev/make-distribution.sh --tgz -Phadoop-2.7 -Pkubernetes
|
||
</code></pre>
|
||
<p>第一次编译和发布的过程耗时可能会比较长,请耐心等待,如果有依赖下载不下来,请自备梯子。</p>
|
||
<p>详细的开发指南请见:<a href="https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/README.md" target="_blank">https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/README.md</a></p>
|
||
<h3 id="构建镜像">构建镜像</h3>
|
||
<p>使用该脚本来自动构建容器镜像:<a href="https://github.com/apache-spark-on-k8s/spark/pull/488" target="_blank">https://github.com/apache-spark-on-k8s/spark/pull/488</a></p>
|
||
<p>将该脚本放在 <code>dist</code> 目录下,执行:</p>
|
||
<pre><code class="lang-bash">./build-push-docker-images.sh -r sz-pg-oam-docker-hub-001.tendcloud.com/library -t v2.1.0-kubernetes-0.3.1-1 build
|
||
./build-push-docker-images.sh -r sz-pg-oam-docker-hub-001.tendcloud.com/library -t v2.1.0-kubernetes-0.3.1-1 push
|
||
</code></pre>
|
||
<p><strong>注意:</strong>如果你使用的 MacOS,bash 的版本可能太低,执行改脚本将出错,请检查你的 bash 版本:</p>
|
||
<pre><code class="lang-bash">bash --version
|
||
GNU bash, version 3.2.57(1)-release (x86_64-apple-darwin16)
|
||
Copyright (C) 2007 Free Software Foundation, Inc.
|
||
</code></pre>
|
||
<p>上面我在升级 bash 之前获取的版本信息,使用下面的命令升级 bash:</p>
|
||
<pre><code class="lang-bash">brew install bash
|
||
</code></pre>
|
||
<p>升级后的 bash 版本为 <code>4.4.12(1)-release (x86_64-apple-darwin16.3.0)</code>。</p>
|
||
<p>编译并上传镜像到我的私有镜像仓库,将会构建出如下几个镜像:</p>
|
||
<pre><code class="lang-bash">sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-resource-staging-server:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-shuffle:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor-py:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver-py:v2.1.0-kubernetes-0.3.1-1
|
||
</code></pre>
|
||
<h2 id="运行测试">运行测试</h2>
|
||
<p>在 <code>dist/bin</code> 目录下执行 spark-pi 测试:</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
<span class="hljs-built_in">local</span>:///opt/spark/examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar
|
||
</code></pre>
|
||
<p>详细的参数说明见:<a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html" target="_blank">https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html</a></p>
|
||
<p><strong>注意:</strong><code>local:///opt/spark/examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar</code> 文件是在 <code>spark-driver</code> 和 <code>spark-executor</code> 镜像里的,在上一步构建镜像时已经构建并上传到了镜像仓库中。</p>
|
||
<p>执行日志显示:</p>
|
||
<pre><code class="lang-bash">2017-09-14 14:59:01 INFO Client:54 - Waiting <span class="hljs-keyword">for</span> application spark-pi to finish...
|
||
2017-09-14 14:59:01 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: N/A
|
||
start time: N/A
|
||
container images: N/A
|
||
phase: Pending
|
||
status: []
|
||
2017-09-14 14:59:01 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: N/A
|
||
container images: N/A
|
||
phase: Pending
|
||
status: []
|
||
2017-09-14 14:59:01 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: 2017-09-14T06:59:01Z
|
||
container images: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
phase: Pending
|
||
status: [ContainerStatus(containerID=null, image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1, imageID=, lastState=ContainerState(running=null, terminated=null, waiting=null, additionalProperties={}), name=spark-kubernetes-driver, ready=<span class="hljs-literal">false</span>, restartCount=0, state=ContainerState(running=null, terminated=null, waiting=ContainerStateWaiting(message=null, reason=ContainerCreating, additionalProperties={}), additionalProperties={}), additionalProperties={})]
|
||
2017-09-14 14:59:03 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: 2017-09-14T06:59:01Z
|
||
container images: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
phase: Running
|
||
status: [ContainerStatus(containerID=docker://5c5c821c482a1e35552adccb567020532b79244392374f25754f0050e6<span class="hljs-built_in">cd</span>4c62, image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1, imageID=docker-pullable://sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver@sha256:beb92a3e3f178e286d9e5baebdead88b5ba76d651f347ad2864bb6f8eda26f94, lastState=ContainerState(running=null, terminated=null, waiting=null, additionalProperties={}), name=spark-kubernetes-driver, ready=<span class="hljs-literal">true</span>, restartCount=0, state=ContainerState(running=ContainerStateRunning(startedAt=2017-09-14T06:59:02Z, additionalProperties={}), terminated=null, waiting=null, additionalProperties={}), additionalProperties={})]
|
||
2017-09-14 14:59:12 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: 2017-09-14T06:59:01Z
|
||
container images: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
phase: Succeeded
|
||
status: [ContainerStatus(containerID=docker://5c5c821c482a1e35552adccb567020532b79244392374f25754f0050e6<span class="hljs-built_in">cd</span>4c62, image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1, imageID=docker-pullable://sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver@sha256:beb92a3e3f178e286d9e5baebdead88b5ba76d651f347ad2864bb6f8eda26f94, lastState=ContainerState(running=null, terminated=null, waiting=null, additionalProperties={}), name=spark-kubernetes-driver, ready=<span class="hljs-literal">false</span>, restartCount=0, state=ContainerState(running=null, terminated=ContainerStateTerminated(containerID=docker://5c5c821c482a1e35552adccb567020532b79244392374f25754f0050e6<span class="hljs-built_in">cd</span>4c62, <span class="hljs-built_in">exit</span>Code=0, finishedAt=2017-09-14T06:59:11Z, message=null, reason=Completed, signal=null, startedAt=null, additionalProperties={}), waiting=null, additionalProperties={}), additionalProperties={})]
|
||
2017-09-14 14:59:12 INFO LoggingPodStatusWatcherImpl:54 - Container final statuses:
|
||
|
||
|
||
Container name: spark-kubernetes-driver
|
||
Container image: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
Container state: Terminated
|
||
Exit code: 0
|
||
2017-09-14 14:59:12 INFO Client:54 - Application spark-pi finished.
|
||
</code></pre>
|
||
<p>从日志中可以看到任务运行的状态信息。</p>
|
||
<p>使用下面的命令可以看到 kubernetes 启动的 Pod 信息:</p>
|
||
<pre><code class="lang-bash">kubectl --namespace spark-cluster get pods -w
|
||
</code></pre>
|
||
<p>将会看到 <code>spark-driver</code> 和 <code>spark-exec</code> 的 Pod 信息。</p>
|
||
<h2 id="依赖管理">依赖管理</h2>
|
||
<p>上文中我们在运行测试程序时,命令行中指定的 jar 文件已包含在 docker 镜像中,是不是说我们每次提交任务都需要重新创建一个镜像呢?非也!如果真是这样也太麻烦了。</p>
|
||
<h4 id="创建-resource-staging-server">创建 resource staging server</h4>
|
||
<p>为了方便用户提交任务,不需要每次提交任务的时候都创建一个镜像,我们使用了 <strong>resource staging server</strong> 。</p>
|
||
<pre><code>kubectl create -f conf/kubernetes-resource-staging-server.yaml
|
||
</code></pre><p>我们同样将其部署在 <code>spark-cluster</code> namespace 下,该 yaml 文件见 <a href="https://github.com/rootsongjc/kubernetes-handbook" target="_blank">kubernetes-handbook</a> 的 <code>manifests/spark-with-kubernetes-native-scheduler</code> 目录。</p>
|
||
<h4 id="优化">优化</h4>
|
||
<p>其中有一点需要优化,在使用下面的命令提交任务时,使用 <code>--conf spark.kubernetes.resourceStagingServer.uri</code> 参数指定 <em>resource staging server</em> 地址,用户不应该关注 <em>resource staging server</em> 究竟运行在哪台宿主机上,可以使用下面两种方式实现:</p>
|
||
<ul>
|
||
<li>使用 <code>nodeSelector</code> 将 <em>resource staging server</em> 固定调度到某一台机器上,该地址依然使用宿主机的 IP 地址</li>
|
||
<li>改变 <code>spark-resource-staging-service</code> service 的 type 为 <strong>ClusterIP</strong>, 然后使用 <strong>Ingress</strong> 将其暴露到集群外部,然后加入的内网 DNS 里,用户使用 DNS 名称指定 <em>resource staging server</em> 的地址。</li>
|
||
</ul>
|
||
<p>然后可以执行下面的命令来提交本地的 jar 到 kubernetes 上运行。</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.resourceStagingServer.uri=http://172.20.0.114:31000 \
|
||
../examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar
|
||
</code></pre>
|
||
<p>该命令将提交本地的 <code>../examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar</code> 文件到 <em>resource staging server</em>,executor 将从该 server 上获取 jar 包并运行,这样用户就不需要每次提交任务都编译一个镜像了。</p>
|
||
<p>详见:<a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html#dependency-management" target="_blank">https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html#dependency-management</a></p>
|
||
<h4 id="设置-hdfs-用户">设置 HDFS 用户</h4>
|
||
<p>如果 Hadoop 集群没有设置 kerbros 安全认证的话,在指定 <code>spark-submit</code> 的时候可以通过指定如下四个环境变量, 设置 Spark 与 HDFS 通信使用的用户:</p>
|
||
<pre><code class="lang-bash"> --conf spark.kubernetes.driverEnv.SPARK_USER=hadoop
|
||
--conf spark.kubernetes.driverEnv.HADOOP_USER_NAME=hadoop
|
||
--conf spark.executorEnv.HADOOP_USER_NAME=hadoop
|
||
--conf spark.executorEnv.SPARK_USER=hadoop
|
||
</code></pre>
|
||
<p>使用 hadoop 用户提交本地 jar 包的命令示例:</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class com.talkingdata.alluxio.hadooptest \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.driverEnv.SPARK_USER=hadoop \
|
||
--conf spark.kubernetes.driverEnv.HADOOP_USER_NAME=hadoop \
|
||
--conf spark.executorEnv.HADOOP_USER_NAME=hadoop \
|
||
--conf spark.executorEnv.SPARK_USER=hadoop \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.resourceStagingServer.uri=http://172.20.0.114:31000 \
|
||
~/Downloads/tendcloud_2.10-1.0.jar
|
||
</code></pre>
|
||
<p>详见:<a href="https://github.com/apache-spark-on-k8s/spark/issues/408" target="_blank">https://github.com/apache-spark-on-k8s/spark/issues/408</a></p>
|
||
<h4 id="限制-driver-和-executor-的资源使用">限制 Driver 和 Executor 的资源使用</h4>
|
||
<p>在执行 <code>spark-submit</code> 时使用如下参数设置内存和 CPU 资源限制:</p>
|
||
<pre><code class="lang-bash">--conf spark.driver.memory=3G
|
||
--conf spark.executor.memory=3G
|
||
--conf spark.driver.cores=2
|
||
--conf spark.executor.cores=10
|
||
</code></pre>
|
||
<p>这几个参数中值如何传递到 Pod 的资源设置中的呢?</p>
|
||
<p>比如我们设置在执行 <code>spark-submit</code> 的时候传递了这样的两个参数:<code>--conf spark.driver.cores=2</code> 和 <code>--conf spark.driver.memory=100G</code> 那么查看 driver pod 的 yaml 输出结果将会看到这样的资源设置:</p>
|
||
<pre><code class="lang-yaml"><span class="hljs-attr"> resources:</span>
|
||
<span class="hljs-attr"> limits:</span>
|
||
<span class="hljs-attr"> memory:</span> <span class="hljs-number">110</span>Gi
|
||
<span class="hljs-attr"> requests:</span>
|
||
<span class="hljs-attr"> cpu:</span> <span class="hljs-string">"2"</span>
|
||
<span class="hljs-attr"> memory:</span> <span class="hljs-number">100</span>Gi
|
||
</code></pre>
|
||
<p>以上参数是对 <code>request</code> 值的设置,那么 <code>limit</code> 的资源设置的值又是从何而来?</p>
|
||
<p>可以使用 <code>spark.kubernetes.driver.limit.cores</code> 和 <code>spark.kubernetes.executor.limit.cores</code> 来设置 CPU的 hard limit。</p>
|
||
<p>memory limit 的值是根据 memory request 的值加上 <code>spark.kubernetes.executor.memoryOverhead</code> 的值计算而来的,该配置项用于设置分配给每个 executor 的超过 heap 内存的值(可以使用k、m、g单位)。该值用于虚拟机的开销、其他本地服务开销。根据 executor 的大小设置(通常是 6%到10%)。</p>
|
||
<p>我们可以这样来提交一个任务,同时设置 driver 和 executor 的 CPU、内存的资源 request 和 limit 值(driver 的内存 limit 值为 request 值的 110%)。</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.driver.memory=100G \
|
||
--conf spark.executor.memory=10G \
|
||
--conf spark.driver.cores=30 \
|
||
--conf spark.executor.cores=2 \
|
||
--conf spark.driver.maxResultSize=10240m \
|
||
--conf spark.kubernetes.driver.limit.cores=32 \
|
||
--conf spark.kubernetes.executor.limit.cores=3 \
|
||
--conf spark.kubernetes.executor.memoryOverhead=2g \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
<span class="hljs-built_in">local</span>:///opt/spark/examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar 10000000
|
||
</code></pre>
|
||
<p>这将启动一个包含一千万个 task 的计算 pi 的 spark 任务,任务运行过程中,drvier 的 CPU 实际消耗大约为 3 核,内存 40G,每个 executor 的 CPU 实际消耗大约不到 1 核,内存不到 4G,我们可以根据实际资源消耗不断优化资源的 request 值。</p>
|
||
<p><code>SPARK_DRIVER_MEMORY</code> 和 <code>SPARK_EXECUTOR_MEMORY</code> 和分别作为 Driver 容器和 Executor 容器启动的环境变量,比如下面这个 Driver 启动的 CMD 中:</p>
|
||
<pre><code class="lang-bash">CMD SPARK_CLASSPATH=<span class="hljs-string">"<span class="hljs-variable">${SPARK_HOME}</span>/jars/*"</span> && \
|
||
env | grep SPARK_JAVA_OPT_ | sed <span class="hljs-string">'s/[^=]*=\(.*\)/\1/g'</span> > /tmp/java_opts.txt && \
|
||
<span class="hljs-built_in">readarray</span> -t SPARK_DRIVER_JAVA_OPTS < /tmp/java_opts.txt && \
|
||
<span class="hljs-keyword">if</span> ! [ -z <span class="hljs-variable">${SPARK_MOUNTED_CLASSPATH+x}</span> ]; <span class="hljs-keyword">then</span> SPARK_CLASSPATH=<span class="hljs-string">"<span class="hljs-variable">$SPARK_MOUNTED_CLASSPATH</span>:<span class="hljs-variable">$SPARK_CLASSPATH</span>"</span>; <span class="hljs-keyword">fi</span> && \
|
||
<span class="hljs-keyword">if</span> ! [ -z <span class="hljs-variable">${SPARK_SUBMIT_EXTRA_CLASSPATH+x}</span> ]; <span class="hljs-keyword">then</span> SPARK_CLASSPATH=<span class="hljs-string">"<span class="hljs-variable">$SPARK_SUBMIT_EXTRA_CLASSPATH</span>:<span class="hljs-variable">$SPARK_CLASSPATH</span>"</span>; <span class="hljs-keyword">fi</span> && \
|
||
<span class="hljs-keyword">if</span> ! [ -z <span class="hljs-variable">${SPARK_EXTRA_CLASSPATH+x}</span> ]; <span class="hljs-keyword">then</span> SPARK_CLASSPATH=<span class="hljs-string">"<span class="hljs-variable">$SPARK_EXTRA_CLASSPATH</span>:<span class="hljs-variable">$SPARK_CLASSPATH</span>"</span>; <span class="hljs-keyword">fi</span> && \
|
||
<span class="hljs-keyword">if</span> ! [ -z <span class="hljs-variable">${SPARK_MOUNTED_FILES_DIR+x}</span> ]; <span class="hljs-keyword">then</span> cp -R <span class="hljs-string">"<span class="hljs-variable">$SPARK_MOUNTED_FILES_DIR</span>/."</span> .; <span class="hljs-keyword">fi</span> && \
|
||
<span class="hljs-keyword">if</span> ! [ -z <span class="hljs-variable">${SPARK_MOUNTED_FILES_FROM_SECRET_DIR}</span> ]; <span class="hljs-keyword">then</span> cp -R <span class="hljs-string">"<span class="hljs-variable">$SPARK_MOUNTED_FILES_FROM_SECRET_DIR</span>/."</span> .; <span class="hljs-keyword">fi</span> && \
|
||
<span class="hljs-variable">${JAVA_HOME}</span>/bin/java <span class="hljs-string">"<span class="hljs-variable">${SPARK_DRIVER_JAVA_OPTS[@]}</span>"</span> -cp <span class="hljs-variable">$SPARK_CLASSPATH</span> -Xms<span class="hljs-variable">$SPARK_DRIVER_MEMORY</span> -Xmx<span class="hljs-variable">$SPARK_DRIVER_MEMORY</span> <span class="hljs-variable">$SPARK_DRIVER_CLASS</span> <span class="hljs-variable">$SPARK_DRIVER_ARGS</span>
|
||
</code></pre>
|
||
<p>我们可以看到对 <code>SPARK_DRIVER_MEMORY</code> 环境变量的引用。Executor 的设置与 driver 类似。</p>
|
||
<p>而我们可以使用这样的参数来传递环境变量的值 <code>spark.executorEnv.[EnvironmentVariableName]</code>,只要将 <code>EnvironmentVariableName</code> 替换为环境变量名称即可。</p>
|
||
<h2 id="参考">参考</h2>
|
||
<p><a href="http://lxw1234.com/archives/2015/12/593.htm" target="_blank">Spark动态资源分配-Dynamic Resource Allocation</a></p>
|
||
<p><a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html" target="_blank">Running Spark on Kubernetes</a></p>
|
||
<p><a href="https://issues.apache.org/jira/browse/SPARK-18278" target="_blank">Apache Spark Jira Issue - 18278 - SPIP: Support native submission of spark jobs to a kubernetes cluster</a></p>
|
||
<p><a href="https://github.com/kubernetes/kubernetes/issues/34377" target="_blank">Kubernetes Github Issue - 34377 Support Spark natively in Kubernetes</a></p>
|
||
<p><a href="https://github.com/kubernetes/kubernetes/tree/master/examples/spark" target="_blank">Kubernetes example spark</a></p>
|
||
<p><a href="https://github.com/rootsongjc/spark-on-kubernetes" target="_blank">https://github.com/rootsongjc/spark-on-kubernetes</a></p>
|
||
<p><a href="https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/architecture-docs/scheduler-backend.md" target="_blank">Scheduler backend</a></p>
|
||
<footer class="page-footer"><span class="copyright">Copyright © jimmysong.io 2017 all right reserved,powered by Gitbook</span><span class="footer-modification">Updated:
|
||
2017-09-27 21:03:00
|
||
</span></footer>
|
||
|
||
</section>
|
||
|
||
</div>
|
||
<div class="search-results">
|
||
<div class="has-results">
|
||
|
||
<h1 class="search-results-title"><span class='search-results-count'></span> results matching "<span class='search-query'></span>"</h1>
|
||
<ul class="search-results-list"></ul>
|
||
|
||
</div>
|
||
<div class="no-results">
|
||
|
||
<h1 class="search-results-title">No results matching "<span class='search-query'></span>"</h1>
|
||
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
</div>
|
||
</div>
|
||
|
||
</div>
|
||
|
||
|
||
|
||
<a href="spark-standalone-on-kubernetes.html" class="navigation navigation-prev " aria-label="Previous page: 5.2.1 Spark standalone on Kubernetes">
|
||
<i class="fa fa-angle-left"></i>
|
||
</a>
|
||
|
||
|
||
<a href="serverless.html" class="navigation navigation-next " aria-label="Next page: 5.3 Serverless架构">
|
||
<i class="fa fa-angle-right"></i>
|
||
</a>
|
||
|
||
|
||
|
||
</div>
|
||
|
||
<script>
|
||
var gitbook = gitbook || [];
|
||
gitbook.push(function() {
|
||
gitbook.page.hasChanged({"page":{"title":"5.2.2 运行支持kubernetes原生调度的Spark程序","level":"1.6.3.2","depth":3,"next":{"title":"5.3 Serverless架构","level":"1.6.4","depth":2,"path":"usecases/serverless.md","ref":"usecases/serverless.md","articles":[]},"previous":{"title":"5.2.1 Spark standalone on Kubernetes","level":"1.6.3.1","depth":3,"path":"usecases/spark-standalone-on-kubernetes.md","ref":"usecases/spark-standalone-on-kubernetes.md","articles":[]},"dir":"ltr"},"config":{"plugins":["github","codesnippet","splitter","page-toc-button","image-captions","editlink","back-to-top-button","-lunr","-search","search-plus","github-buttons@2.1.0","favicon@^0.0.2","tbfed-pagefooter@^0.0.1","3-ba"],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"tbfed-pagefooter":{"copyright":"Copyright © jimmysong.io 2017","modify_label":"Updated:","modify_format":"YYYY-MM-DD HH:mm:ss"},"github":{"url":"https://github.com/rootsongjc/kubernetes-handbook"},"editlink":{"label":"编辑本页","multilingual":false,"base":"https://github.com/rootsongjc/kubernetes-handbook/blob/master/"},"splitter":{},"codesnippet":{},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"favicon":{"shortcut":"favicon.ico","bookmark":"favicon.ico"},"page-toc-button":{},"back-to-top-button":{},"github-buttons":{"repo":"rootsongjc/kubernetes-handbook","types":["star"],"size":"small"},"3-ba":{"configuration":"auto","token":"11f7d254cfa4e0ca44b175c66d379ecc"},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"search-plus":{},"image-captions":{"caption":"图片 - _CAPTION_","variable_name":"_pictures"}},"theme":"default","author":"Jimmy Song","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{"_pictures":[{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.1","level":"1.2","list_caption":"Figure: 云计算演进历程","alt":"云计算演进历程","nro":1,"url":"../images/cloud-computing-evolution-road.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"云计算演进历程","attributes":{},"skip":false,"key":"1.2.1"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.2","level":"1.2","list_caption":"Figure: Cloud native思维导图","alt":"Cloud native思维导图","nro":2,"url":"../images/cloud-native-architecutre-mindnode.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"Cloud native思维导图","attributes":{},"skip":false,"key":"1.2.2"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.3","level":"1.2","list_caption":"Figure: 十二因素应用","alt":"十二因素应用","nro":3,"url":"../images/12-factor-app.png","index":3,"caption_template":"图片 - _CAPTION_","label":"十二因素应用","attributes":{},"skip":false,"key":"1.2.3"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.4","level":"1.2","list_caption":"Figure: 使用Jenkins进行持续集成与发布流程图","alt":"使用Jenkins进行持续集成与发布流程图","nro":4,"url":"../images/kubernetes-jenkins-ci-cd.png","index":4,"caption_template":"图片 - _CAPTION_","label":"使用Jenkins进行持续集成与发布流程图","attributes":{},"skip":false,"key":"1.2.4"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.5","level":"1.2","list_caption":"Figure: filebeat日志收集架构图","alt":"filebeat日志收集架构图","nro":5,"url":"../images/filebeat-log-collector-arch.png","index":5,"caption_template":"图片 - _CAPTION_","label":"filebeat日志收集架构图","attributes":{},"skip":false,"key":"1.2.5"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.6","level":"1.2","list_caption":"Figure: API文档","alt":"API文档","nro":6,"url":"../images/k8s-app-monitor-test-api-doc.jpg","index":6,"caption_template":"图片 - _CAPTION_","label":"API文档","attributes":{},"skip":false,"key":"1.2.6"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.7","level":"1.2","list_caption":"Figure: 迁移步骤示意图","alt":"迁移步骤示意图","nro":7,"url":"../images/migrating-hadoop-yarn-to-kubernetes.png","index":7,"caption_template":"图片 - _CAPTION_","label":"迁移步骤示意图","attributes":{},"skip":false,"key":"1.2.7"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.8","level":"1.2","list_caption":"Figure: service mesh架构图","alt":"service mesh架构图","nro":8,"url":"../images/serivce-mesh-control-plane.png","index":8,"caption_template":"图片 - _CAPTION_","label":"service mesh架构图","attributes":{},"skip":false,"key":"1.2.8"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.9","level":"1.2","list_caption":"Figure: kibana界面","alt":"kibana界面","nro":9,"url":"../images/filebeat-docker-test.jpg","index":9,"caption_template":"图片 - _CAPTION_","label":"kibana界面","attributes":{},"skip":false,"key":"1.2.9"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.10","level":"1.2","list_caption":"Figure: Grafana界面示意图1","alt":"Grafana界面示意图1","nro":10,"url":"../images/kubernetes-devops-example-grafana-1.png","index":10,"caption_template":"图片 - _CAPTION_","label":"Grafana界面示意图1","attributes":{},"skip":false,"key":"1.2.10"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.11","level":"1.2","list_caption":"Figure: Grafana界面示意图2","alt":"Grafana界面示意图2","nro":11,"url":"../images/kubernetes-devops-example-grafana-2.png","index":11,"caption_template":"图片 - _CAPTION_","label":"Grafana界面示意图2","attributes":{},"skip":false,"key":"1.2.11"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.12","level":"1.2","list_caption":"Figure: Grafana界面示意图3","alt":"Grafana界面示意图3","nro":12,"url":"../images/kubernetes-devops-example-grafana-3.png","index":12,"caption_template":"图片 - _CAPTION_","label":"Grafana界面示意图3","attributes":{},"skip":false,"key":"1.2.12"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.13","level":"1.2","list_caption":"Figure: dashboard","alt":"dashboard","nro":13,"url":"../images/spark-job-on-kubernetes-example-1.jpg","index":13,"caption_template":"图片 - _CAPTION_","label":"dashboard","attributes":{},"skip":false,"key":"1.2.13"},{"backlink":"cloud-native/kubernetes-and-cloud-native-app-overview.html#fig1.2.14","level":"1.2","list_caption":"Figure: Grafana","alt":"Grafana","nro":14,"url":"../images/spark-job-on-kubernetes-example-2.jpg","index":14,"caption_template":"图片 - _CAPTION_","label":"Grafana","attributes":{},"skip":false,"key":"1.2.14"},{"backlink":"concepts/index.html#fig1.3.1","level":"1.3","list_caption":"Figure: Borg架构","alt":"Borg架构","nro":15,"url":"../images/borg.png","index":1,"caption_template":"图片 - _CAPTION_","label":"Borg架构","attributes":{},"skip":false,"key":"1.3.1"},{"backlink":"concepts/index.html#fig1.3.2","level":"1.3","list_caption":"Figure: Kubernetes架构","alt":"Kubernetes架构","nro":16,"url":"../images/architecture.png","index":2,"caption_template":"图片 - _CAPTION_","label":"Kubernetes架构","attributes":{},"skip":false,"key":"1.3.2"},{"backlink":"concepts/index.html#fig1.3.3","level":"1.3","list_caption":"Figure: kubernetes整体架构示意图","alt":"kubernetes整体架构示意图","nro":17,"url":"../images/kubernetes-whole-arch.png","index":3,"caption_template":"图片 - _CAPTION_","label":"kubernetes整体架构示意图","attributes":{},"skip":false,"key":"1.3.3"},{"backlink":"concepts/index.html#fig1.3.4","level":"1.3","list_caption":"Figure: Kubernetes master架构示意图","alt":"Kubernetes master架构示意图","nro":18,"url":"../images/kubernetes-master-arch.png","index":4,"caption_template":"图片 - _CAPTION_","label":"Kubernetes master架构示意图","attributes":{},"skip":false,"key":"1.3.4"},{"backlink":"concepts/index.html#fig1.3.5","level":"1.3","list_caption":"Figure: kubernetes node架构示意图","alt":"kubernetes node架构示意图","nro":19,"url":"../images/kubernetes-node-arch.png","index":5,"caption_template":"图片 - _CAPTION_","label":"kubernetes node架构示意图","attributes":{},"skip":false,"key":"1.3.5"},{"backlink":"concepts/index.html#fig1.3.6","level":"1.3","list_caption":"Figure: Kubernetes分层架构示意图","alt":"Kubernetes分层架构示意图","nro":20,"url":"../images/kubernetes-layers-arch.jpg","index":6,"caption_template":"图片 - _CAPTION_","label":"Kubernetes分层架构示意图","attributes":{},"skip":false,"key":"1.3.6"},{"backlink":"concepts/concepts.html#fig1.3.1.1","level":"1.3.1","list_caption":"Figure: 分层架构示意图","alt":"分层架构示意图","nro":21,"url":"../images/kubernetes-layers-arch.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"分层架构示意图","attributes":{},"skip":false,"key":"1.3.1.1"},{"backlink":"concepts/pod-overview.html#fig1.3.2.1.1","level":"1.3.2.1","list_caption":"Figure: pod diagram","alt":"pod diagram","nro":22,"url":"../images/pod-overview.png","index":1,"caption_template":"图片 - _CAPTION_","label":"pod diagram","attributes":{},"skip":false,"key":"1.3.2.1.1"},{"backlink":"concepts/pod.html#fig1.3.2.1.1.1","level":"1.3.2.1.1","list_caption":"Figure: Pod示意图","alt":"Pod示意图","nro":23,"url":"../images/pod-overview.png","index":1,"caption_template":"图片 - _CAPTION_","label":"Pod示意图","attributes":{},"skip":false,"key":"1.3.2.1.1.1"},{"backlink":"concepts/pod.html#fig1.3.2.1.1.2","level":"1.3.2.1.1","list_caption":"Figure: Pod Cheatsheet","alt":"Pod Cheatsheet","nro":24,"url":"../images/kubernetes-pod-cheatsheet.png","index":2,"caption_template":"图片 - _CAPTION_","label":"Pod Cheatsheet","attributes":{},"skip":false,"key":"1.3.2.1.1.2"},{"backlink":"concepts/service.html#fig1.3.2.4.1","level":"1.3.2.4","list_caption":"Figure: userspace代理模式下Service概览图","alt":"userspace代理模式下Service概览图","nro":25,"url":"../images/services-userspace-overview.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"userspace代理模式下Service概览图","attributes":{},"skip":false,"key":"1.3.2.4.1"},{"backlink":"concepts/service.html#fig1.3.2.4.2","level":"1.3.2.4","list_caption":"Figure: iptables代理模式下Service概览图","alt":"iptables代理模式下Service概览图","nro":26,"url":"../images/services-iptables-overview.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"iptables代理模式下Service概览图","attributes":{},"skip":false,"key":"1.3.2.4.2"},{"backlink":"concepts/deployment.html#fig1.3.2.6.1","level":"1.3.2.6","list_caption":"Figure: kubernetes deployment cheatsheet","alt":"kubernetes deployment cheatsheet","nro":27,"url":"../images/deployment-cheatsheet.png","index":1,"caption_template":"图片 - _CAPTION_","label":"kubernetes deployment cheatsheet","attributes":{},"skip":false,"key":"1.3.2.6.1"},{"backlink":"concepts/horizontal-pod-autoscaling.html#fig1.3.2.16.1","level":"1.3.2.16","list_caption":"Figure: horizontal-pod-autoscaler","alt":"horizontal-pod-autoscaler","nro":28,"url":"../images/horizontal-pod-autoscaler.png","index":1,"caption_template":"图片 - _CAPTION_","label":"horizontal-pod-autoscaler","attributes":{},"skip":false,"key":"1.3.2.16.1"},{"backlink":"concepts/label.html#fig1.3.2.17.1","level":"1.3.2.17","list_caption":"Figure: label示意图","alt":"label示意图","nro":29,"url":"../images/labels.png","index":1,"caption_template":"图片 - _CAPTION_","label":"label示意图","attributes":{},"skip":false,"key":"1.3.2.17.1"},{"backlink":"guide/using-kubectl.html#fig1.4.2.2.1","level":"1.4.2.2","list_caption":"Figure: kubectl cheatsheet","alt":"kubectl cheatsheet","nro":30,"url":"../images/kubernetes-kubectl-cheatsheet.png","index":1,"caption_template":"图片 - _CAPTION_","label":"kubectl cheatsheet","attributes":{},"skip":false,"key":"1.4.2.2.1"},{"backlink":"guide/using-kubectl.html#fig1.4.2.2.2","level":"1.4.2.2","list_caption":"Figure: kube-shell页面","alt":"kube-shell页面","nro":31,"url":"../images/kube-shell.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"kube-shell页面","attributes":{},"skip":false,"key":"1.4.2.2.2"},{"backlink":"guide/ip-masq-agent.html#fig1.4.3.6.1","level":"1.4.3.6","list_caption":"Figure: IP伪装代理示意图","alt":"IP伪装代理示意图","nro":32,"url":"../images/ip-masq.png","index":1,"caption_template":"图片 - _CAPTION_","label":"IP伪装代理示意图","attributes":{},"skip":false,"key":"1.4.3.6.1"},{"backlink":"guide/deploy-applications-in-kubernetes.html#fig1.4.5.1.1","level":"1.4.5.1","list_caption":"Figure: API","alt":"API","nro":33,"url":"../images/k8s-app-monitor-test-api-doc.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"API","attributes":{},"skip":false,"key":"1.4.5.1.1"},{"backlink":"guide/deploy-applications-in-kubernetes.html#fig1.4.5.1.2","level":"1.4.5.1","list_caption":"Figure: wercker","alt":"wercker","nro":34,"url":"../images/k8s-app-monitor-agent-wercker.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"wercker","attributes":{},"skip":false,"key":"1.4.5.1.2"},{"backlink":"guide/deploy-applications-in-kubernetes.html#fig1.4.5.1.3","level":"1.4.5.1","list_caption":"Figure: 图表","alt":"图表","nro":35,"url":"../images/k8s-app-monitor-agent.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"图表","attributes":{},"skip":false,"key":"1.4.5.1.3"},{"backlink":"guide/migrating-hadoop-yarn-to-kubernetes.html#fig1.4.5.2.1","level":"1.4.5.2","list_caption":"Figure: spark on yarn with kubernetes","alt":"spark on yarn with kubernetes","nro":36,"url":"../images/spark-on-yarn-with-kubernetes.png","index":1,"caption_template":"图片 - _CAPTION_","label":"spark on yarn with kubernetes","attributes":{},"skip":false,"key":"1.4.5.2.1"},{"backlink":"guide/migrating-hadoop-yarn-to-kubernetes.html#fig1.4.5.2.2","level":"1.4.5.2","list_caption":"Figure: Terms","alt":"Terms","nro":37,"url":"../images/terms-in-kubernetes-app-deployment.png","index":2,"caption_template":"图片 - _CAPTION_","label":"Terms","attributes":{},"skip":false,"key":"1.4.5.2.2"},{"backlink":"guide/migrating-hadoop-yarn-to-kubernetes.html#fig1.4.5.2.3","level":"1.4.5.2","list_caption":"Figure: 分解步骤解析","alt":"分解步骤解析","nro":38,"url":"../images/migrating-hadoop-yarn-to-kubernetes.png","index":3,"caption_template":"图片 - _CAPTION_","label":"分解步骤解析","attributes":{},"skip":false,"key":"1.4.5.2.3"},{"backlink":"practice/node-installation.html#fig1.5.1.6.1","level":"1.5.1.6","list_caption":"Figure: welcome-nginx","alt":"welcome-nginx","nro":39,"url":"http://olz1di9xf.bkt.clouddn.com/kubernetes-installation-test-nginx.png","index":1,"caption_template":"图片 - _CAPTION_","label":"welcome-nginx","attributes":{},"skip":false,"key":"1.5.1.6.1"},{"backlink":"practice/dashboard-addon-installation.html#fig1.5.1.8.1","level":"1.5.1.8","list_caption":"Figure: kubernetes-dashboard","alt":"kubernetes-dashboard","nro":40,"url":"http://olz1di9xf.bkt.clouddn.com/kubernetes-dashboard-raw.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"kubernetes-dashboard","attributes":{},"skip":false,"key":"1.5.1.8.1"},{"backlink":"practice/dashboard-addon-installation.html#fig1.5.1.8.2","level":"1.5.1.8","list_caption":"Figure: V1.6.3版本的dashboard界面","alt":"V1.6.3版本的dashboard界面","nro":41,"url":"../images/dashboard-v163.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"V1.6.3版本的dashboard界面","attributes":{},"skip":false,"key":"1.5.1.8.2"},{"backlink":"practice/heapster-addon-installation.html#fig1.5.1.9.1","level":"1.5.1.9","list_caption":"Figure: dashboard-heapster","alt":"dashboard-heapster","nro":42,"url":"../images/kubernetes-dashboard-with-heapster.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"dashboard-heapster","attributes":{},"skip":false,"key":"1.5.1.9.1"},{"backlink":"practice/heapster-addon-installation.html#fig1.5.1.9.2","level":"1.5.1.9","list_caption":"Figure: grafana","alt":"grafana","nro":43,"url":"../images/kubernetes-heapster-grafana.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"grafana","attributes":{},"skip":false,"key":"1.5.1.9.2"},{"backlink":"practice/heapster-addon-installation.html#fig1.5.1.9.3","level":"1.5.1.9","list_caption":"Figure: kubernetes-influxdb-heapster","alt":"kubernetes-influxdb-heapster","nro":44,"url":"../images/kubernetes-influxdb-heapster.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"kubernetes-influxdb-heapster","attributes":{},"skip":false,"key":"1.5.1.9.3"},{"backlink":"practice/heapster-addon-installation.html#fig1.5.1.9.4","level":"1.5.1.9","list_caption":"Figure: 修改grafana模板","alt":"修改grafana模板","nro":45,"url":"../images/grafana-dashboard-setting.jpg","index":4,"caption_template":"图片 - _CAPTION_","label":"修改grafana模板","attributes":{},"skip":false,"key":"1.5.1.9.4"},{"backlink":"practice/efk-addon-installation.html#fig1.5.1.10.1","level":"1.5.1.10","list_caption":"Figure: es-setting","alt":"es-setting","nro":46,"url":"../images/es-setting.png","index":1,"caption_template":"图片 - _CAPTION_","label":"es-setting","attributes":{},"skip":false,"key":"1.5.1.10.1"},{"backlink":"practice/efk-addon-installation.html#fig1.5.1.10.2","level":"1.5.1.10","list_caption":"Figure: es-home","alt":"es-home","nro":47,"url":"../images/kubernetes-efk-kibana.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"es-home","attributes":{},"skip":false,"key":"1.5.1.10.2"},{"backlink":"practice/traefik-ingress-installation.html#fig1.5.2.1.1","level":"1.5.2.1","list_caption":"Figure: kubernetes-dashboard","alt":"kubernetes-dashboard","nro":48,"url":"../images/traefik-dashboard.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"kubernetes-dashboard","attributes":{},"skip":false,"key":"1.5.2.1.1"},{"backlink":"practice/traefik-ingress-installation.html#fig1.5.2.1.2","level":"1.5.2.1","list_caption":"Figure: traefik-nginx","alt":"traefik-nginx","nro":49,"url":"../images/traefik-nginx.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"traefik-nginx","attributes":{},"skip":false,"key":"1.5.2.1.2"},{"backlink":"practice/traefik-ingress-installation.html#fig1.5.2.1.3","level":"1.5.2.1","list_caption":"Figure: traefik-guestbook","alt":"traefik-guestbook","nro":50,"url":"../images/traefik-guestbook.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"traefik-guestbook","attributes":{},"skip":false,"key":"1.5.2.1.3"},{"backlink":"practice/distributed-load-test.html#fig1.5.2.2.1","level":"1.5.2.2","list_caption":"Figure: 使用dashboard来扩容","alt":"使用dashboard来扩容","nro":51,"url":"../images/dashbaord-scale.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"使用dashboard来扩容","attributes":{},"skip":false,"key":"1.5.2.2.1"},{"backlink":"practice/distributed-load-test.html#fig1.5.2.2.2","level":"1.5.2.2","list_caption":"Figure: Traefik的UI","alt":"Traefik的UI","nro":52,"url":"../images/traefik-dashboard-locust.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"Traefik的UI","attributes":{},"skip":false,"key":"1.5.2.2.2"},{"backlink":"practice/distributed-load-test.html#fig1.5.2.2.3","level":"1.5.2.2","list_caption":"Figure: Locust启动界面","alt":"Locust启动界面","nro":53,"url":"../images/locust-start-swarming.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"Locust启动界面","attributes":{},"skip":false,"key":"1.5.2.2.3"},{"backlink":"practice/distributed-load-test.html#fig1.5.2.2.4","level":"1.5.2.2","list_caption":"Figure: Dashboard查看页面","alt":"Dashboard查看页面","nro":54,"url":"../images/sample-webapp-rc.jpg","index":4,"caption_template":"图片 - _CAPTION_","label":"Dashboard查看页面","attributes":{},"skip":false,"key":"1.5.2.2.4"},{"backlink":"practice/distributed-load-test.html#fig1.5.2.2.5","level":"1.5.2.2","list_caption":"Figure: Locust测试结果页面","alt":"Locust测试结果页面","nro":55,"url":"../images/locust-dashboard.jpg","index":5,"caption_template":"图片 - _CAPTION_","label":"Locust测试结果页面","attributes":{},"skip":false,"key":"1.5.2.2.5"},{"backlink":"practice/network-and-cluster-perfermance-test.html#fig1.5.2.3.1","level":"1.5.2.3","list_caption":"Figure: kubernetes-dashboard","alt":"kubernetes-dashboard","nro":56,"url":"http://olz1di9xf.bkt.clouddn.com/kubenetes-e2e-test.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"kubernetes-dashboard","attributes":{},"skip":false,"key":"1.5.2.3.1"},{"backlink":"practice/network-and-cluster-perfermance-test.html#fig1.5.2.3.2","level":"1.5.2.3","list_caption":"Figure: locust-test","alt":"locust-test","nro":57,"url":"http://olz1di9xf.bkt.clouddn.com/kubernetes-locust-test.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"locust-test","attributes":{},"skip":false,"key":"1.5.2.3.2"},{"backlink":"practice/edge-node-configuration.html#fig1.5.2.4.1","level":"1.5.2.4","list_caption":"Figure: 边缘节点架构","alt":"边缘节点架构","nro":58,"url":"../images/kubernetes-edge-node-architecture.png","index":1,"caption_template":"图片 - _CAPTION_","label":"边缘节点架构","attributes":{},"skip":false,"key":"1.5.2.4.1"},{"backlink":"practice/app-log-collection.html#fig1.5.3.2.1","level":"1.5.3.2","list_caption":"Figure: filebeat日志收集架构图","alt":"filebeat日志收集架构图","nro":59,"url":"../images/filebeat-log-collector.png","index":1,"caption_template":"图片 - _CAPTION_","label":"filebeat日志收集架构图","attributes":{},"skip":false,"key":"1.5.3.2.1"},{"backlink":"practice/app-log-collection.html#fig1.5.3.2.2","level":"1.5.3.2","list_caption":"Figure: Kibana页面","alt":"Kibana页面","nro":60,"url":"../images/filebeat-docker-test.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"Kibana页面","attributes":{},"skip":false,"key":"1.5.3.2.2"},{"backlink":"practice/app-log-collection.html#fig1.5.3.2.3","level":"1.5.3.2","list_caption":"Figure: filebeat收集的日志详细信息","alt":"filebeat收集的日志详细信息","nro":61,"url":"../images/kubernetes-filebeat-detail.png","index":3,"caption_template":"图片 - _CAPTION_","label":"filebeat收集的日志详细信息","attributes":{},"skip":false,"key":"1.5.3.2.3"},{"backlink":"practice/monitor.html#fig1.5.3.4.1","level":"1.5.3.4","list_caption":"Figure: Kubernetes集群中的监控","alt":"Kubernetes集群中的监控","nro":62,"url":"../images/monitoring-in-kubernetes.png","index":1,"caption_template":"图片 - _CAPTION_","label":"Kubernetes集群中的监控","attributes":{},"skip":false,"key":"1.5.3.4.1"},{"backlink":"practice/monitor.html#fig1.5.3.4.2","level":"1.5.3.4","list_caption":"Figure: kubernetes的容器命名规则示意图","alt":"kubernetes的容器命名规则示意图","nro":63,"url":"../images/kubernetes-container-naming-rule.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"kubernetes的容器命名规则示意图","attributes":{},"skip":false,"key":"1.5.3.4.2"},{"backlink":"practice/monitor.html#fig1.5.3.4.3","level":"1.5.3.4","list_caption":"Figure: Heapster架构图(改进版)","alt":"Heapster架构图(改进版)","nro":64,"url":"../images/kubernetes-heapster-monitoring.png","index":3,"caption_template":"图片 - _CAPTION_","label":"Heapster架构图(改进版)","attributes":{},"skip":false,"key":"1.5.3.4.3"},{"backlink":"practice/monitor.html#fig1.5.3.4.4","level":"1.5.3.4","list_caption":"Figure: 应用监控架构图","alt":"应用监控架构图","nro":65,"url":"../images/kubernetes-app-monitoring.png","index":4,"caption_template":"图片 - _CAPTION_","label":"应用监控架构图","attributes":{},"skip":false,"key":"1.5.3.4.4"},{"backlink":"practice/monitor.html#fig1.5.3.4.5","level":"1.5.3.4","list_caption":"Figure: 应用拓扑图","alt":"应用拓扑图","nro":66,"url":"../images/weave-scope-service-topology.jpg","index":5,"caption_template":"图片 - _CAPTION_","label":"应用拓扑图","attributes":{},"skip":false,"key":"1.5.3.4.5"},{"backlink":"practice/data-persistence-problem.html#fig1.5.3.5.1","level":"1.5.3.5","list_caption":"Figure: 日志持久化收集解决方案示意图","alt":"日志持久化收集解决方案示意图","nro":67,"url":"../images/log-persistence-logstash.png","index":1,"caption_template":"图片 - _CAPTION_","label":"日志持久化收集解决方案示意图","attributes":{},"skip":false,"key":"1.5.3.5.1"},{"backlink":"practice/using-prometheus-to-monitor-kuberentes-cluster.html#fig1.5.3.7.1","level":"1.5.3.7","list_caption":"Figure: Grafana页面","alt":"Grafana页面","nro":68,"url":"../images/kubernetes-prometheus-monitoring.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"Grafana页面","attributes":{},"skip":false,"key":"1.5.3.7.1"},{"backlink":"practice/using-heapster-to-get-object-metrics.html#fig1.5.3.8.1","level":"1.5.3.8","list_caption":"Figure: Heapster架构图","alt":"Heapster架构图","nro":69,"url":"../images/heapster-architecture.png","index":1,"caption_template":"图片 - _CAPTION_","label":"Heapster架构图","attributes":{},"skip":false,"key":"1.5.3.8.1"},{"backlink":"practice/storage-for-containers-using-glusterfs-with-openshift.html#fig1.5.4.1.2.1","level":"1.5.4.1.2","list_caption":"Figure: Screen Shot 2017-03-23 at 21.50.34","alt":"Screen Shot 2017-03-23 at 21.50.34","nro":70,"url":"https://keithtenzer.files.wordpress.com/2017/03/screen-shot-2017-03-23-at-21-50-34.png?w=440","index":1,"caption_template":"图片 - _CAPTION_","label":"Screen Shot 2017-03-23 at 21.50.34","attributes":{},"skip":false,"key":"1.5.4.1.2.1"},{"backlink":"practice/storage-for-containers-using-glusterfs-with-openshift.html#fig1.5.4.1.2.2","level":"1.5.4.1.2","list_caption":"Figure: Screen Shot 2017-03-24 at 11.09.34.png","alt":"Screen Shot 2017-03-24 at 11.09.34.png","nro":71,"url":"https://keithtenzer.files.wordpress.com/2017/03/screen-shot-2017-03-24-at-11-09-341.png?w=440","index":2,"caption_template":"图片 - _CAPTION_","label":"Screen Shot 2017-03-24 at 11.09.34.png","attributes":{},"skip":false,"key":"1.5.4.1.2.2"},{"backlink":"practice/helm.html#fig1.5.5.1.1","level":"1.5.5.1","list_caption":"Figure: Helm chart源","alt":"Helm chart源","nro":72,"url":"../images/helm-charts-repository.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"Helm chart源","attributes":{},"skip":false,"key":"1.5.5.1.1"},{"backlink":"practice/helm.html#fig1.5.5.1.2","level":"1.5.5.1","list_caption":"Figure: TODO应用的Web页面","alt":"TODO应用的Web页面","nro":73,"url":"../images/helm-mean-todo-aholic.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"TODO应用的Web页面","attributes":{},"skip":false,"key":"1.5.5.1.2"},{"backlink":"practice/create-private-charts-repo.html#fig1.5.5.2.1","level":"1.5.5.2","list_caption":"Figure: Helm monocular界面","alt":"Helm monocular界面","nro":74,"url":"../images/helm-monocular-jimmysong.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"Helm monocular界面","attributes":{},"skip":false,"key":"1.5.5.2.1"},{"backlink":"practice/jenkins-ci-cd.html#fig1.5.6.1.1","level":"1.5.6.1","list_caption":"Figure: 基于Jenkins的持续集成与发布","alt":"基于Jenkins的持续集成与发布","nro":75,"url":"../images/kubernetes-jenkins-ci-cd.png","index":1,"caption_template":"图片 - _CAPTION_","label":"基于Jenkins的持续集成与发布","attributes":{},"skip":false,"key":"1.5.6.1.1"},{"backlink":"practice/drone-ci-cd.html#fig1.5.6.2.1","level":"1.5.6.2","list_caption":"Figure: OAuth注册","alt":"OAuth注册","nro":76,"url":"../images/github-oauth-register.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"OAuth注册","attributes":{},"skip":false,"key":"1.5.6.2.1"},{"backlink":"practice/drone-ci-cd.html#fig1.5.6.2.2","level":"1.5.6.2","list_caption":"Figure: OAuth key","alt":"OAuth key","nro":77,"url":"../images/github-oauth-drone-key.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"OAuth key","attributes":{},"skip":false,"key":"1.5.6.2.2"},{"backlink":"practice/drone-ci-cd.html#fig1.5.6.2.3","level":"1.5.6.2","list_caption":"Figure: Drone登陆界面","alt":"Drone登陆界面","nro":78,"url":"../images/drone-login-github.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"Drone登陆界面","attributes":{},"skip":false,"key":"1.5.6.2.3"},{"backlink":"practice/drone-ci-cd.html#fig1.5.6.2.4","level":"1.5.6.2","list_caption":"Figure: Github启用repo设置","alt":"Github启用repo设置","nro":79,"url":"../images/drone-github-active.jpg","index":4,"caption_template":"图片 - _CAPTION_","label":"Github启用repo设置","attributes":{},"skip":false,"key":"1.5.6.2.4"},{"backlink":"practice/drone-ci-cd.html#fig1.5.6.2.5","level":"1.5.6.2","list_caption":"Figure: Github单个repo设置","alt":"Github单个repo设置","nro":80,"url":"../images/drone-github-repo-setting.jpg","index":5,"caption_template":"图片 - _CAPTION_","label":"Github单个repo设置","attributes":{},"skip":false,"key":"1.5.6.2.5"},{"backlink":"practice/dashboard-upgrade.html#fig1.5.7.2.1","level":"1.5.7.2","list_caption":"Figure: 登陆界面","alt":"登陆界面","nro":81,"url":"../images/kubernetes-dashboard-1.7.1-login.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"登陆界面","attributes":{},"skip":false,"key":"1.5.7.2.1"},{"backlink":"practice/dashboard-upgrade.html#fig1.5.7.2.2","level":"1.5.7.2","list_caption":"Figure: 首页","alt":"首页","nro":82,"url":"../images/kubernetes-dashboard-1.7.1-default-page.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"首页","attributes":{},"skip":false,"key":"1.5.7.2.2"},{"backlink":"practice/dashboard-upgrade.html#fig1.5.7.2.3","level":"1.5.7.2","list_caption":"Figure: 用户空间","alt":"用户空间","nro":83,"url":"../images/kubernetes-dashboard-1.7.1-brand.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"用户空间","attributes":{},"skip":false,"key":"1.5.7.2.3"},{"backlink":"practice/dashboard-upgrade.html#fig1.5.7.2.4","level":"1.5.7.2","list_caption":"Figure: kubeconfig文件","alt":"kubeconfig文件","nro":84,"url":"../images/brand-kubeconfig-yaml.jpg","index":4,"caption_template":"图片 - _CAPTION_","label":"kubeconfig文件","attributes":{},"skip":false,"key":"1.5.7.2.4"},{"backlink":"usecases/service-discovery-in-microservices.html#fig1.6.1.1.1","level":"1.6.1.1","list_caption":"Figure: 微服务中的服务发现","alt":"微服务中的服务发现","nro":85,"url":"../images/service-discovery-in-microservices.png","index":1,"caption_template":"图片 - _CAPTION_","label":"微服务中的服务发现","attributes":{},"skip":false,"key":"1.6.1.1.1"},{"backlink":"usecases/service-mesh.html#fig1.6.2.1","level":"1.6.2","list_caption":"Figure: Service Mesh 架构图","alt":"Service Mesh 架构图","nro":86,"url":"../images/serivce-mesh-control-plane.png","index":1,"caption_template":"图片 - _CAPTION_","label":"Service Mesh 架构图","attributes":{},"skip":false,"key":"1.6.2.1"},{"backlink":"usecases/istio.html#fig1.6.2.1.1","level":"1.6.2.1","list_caption":"Figure: Istio架构图","alt":"Istio架构图","nro":87,"url":"../images/istio-arch.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"Istio架构图","attributes":{},"skip":false,"key":"1.6.2.1.1"},{"backlink":"usecases/istio-installation.html#fig1.6.2.1.1.1","level":"1.6.2.1.1","list_caption":"Figure: BookInfo Sample应用架构图","alt":"BookInfo Sample应用架构图","nro":88,"url":"../images/bookinfo-sample-arch.png","index":1,"caption_template":"图片 - _CAPTION_","label":"BookInfo Sample应用架构图","attributes":{},"skip":false,"key":"1.6.2.1.1.1"},{"backlink":"usecases/istio-installation.html#fig1.6.2.1.1.2","level":"1.6.2.1.1","list_caption":"Figure: BookInfo Sample页面","alt":"BookInfo Sample页面","nro":89,"url":"../images/bookinfo-sample.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"BookInfo Sample页面","attributes":{},"skip":false,"key":"1.6.2.1.1.2"},{"backlink":"usecases/istio-installation.html#fig1.6.2.1.1.3","level":"1.6.2.1.1","list_caption":"Figure: Istio Grafana界面","alt":"Istio Grafana界面","nro":90,"url":"../images/istio-grafana.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"Istio Grafana界面","attributes":{},"skip":false,"key":"1.6.2.1.1.3"},{"backlink":"usecases/istio-installation.html#fig1.6.2.1.1.4","level":"1.6.2.1.1","list_caption":"Figure: Prometheus页面","alt":"Prometheus页面","nro":91,"url":"../images/istio-prometheus.jpg","index":4,"caption_template":"图片 - _CAPTION_","label":"Prometheus页面","attributes":{},"skip":false,"key":"1.6.2.1.1.4"},{"backlink":"usecases/istio-installation.html#fig1.6.2.1.1.5","level":"1.6.2.1.1","list_caption":"Figure: Zipkin页面","alt":"Zipkin页面","nro":92,"url":"../images/istio-zipkin.jpg","index":5,"caption_template":"图片 - _CAPTION_","label":"Zipkin页面","attributes":{},"skip":false,"key":"1.6.2.1.1.5"},{"backlink":"usecases/istio-installation.html#fig1.6.2.1.1.6","level":"1.6.2.1.1","list_caption":"Figure: ServiceGraph页面","alt":"ServiceGraph页面","nro":93,"url":"../images/istio-servicegraph.jpg","index":6,"caption_template":"图片 - _CAPTION_","label":"ServiceGraph页面","attributes":{},"skip":false,"key":"1.6.2.1.1.6"},{"backlink":"usecases/linkerd.html#fig1.6.2.2.1","level":"1.6.2.2","list_caption":"Figure: source https://linkerd.io","alt":"source https://linkerd.io","nro":94,"url":"../images/diagram-individual-instance.png","index":1,"caption_template":"图片 - _CAPTION_","label":"source https://linkerd.io","attributes":{},"skip":false,"key":"1.6.2.2.1"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.1","level":"1.6.2.2.1","list_caption":"Figure: Jenkins pipeline","alt":"Jenkins pipeline","nro":95,"url":"../images/linkerd-jenkins-pipeline.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"Jenkins pipeline","attributes":{},"skip":false,"key":"1.6.2.2.1.1"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.2","level":"1.6.2.2.1","list_caption":"Figure: Jenkins config","alt":"Jenkins config","nro":96,"url":"../images/linkerd-jenkins.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"Jenkins config","attributes":{},"skip":false,"key":"1.6.2.2.1.2"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.3","level":"1.6.2.2.1","list_caption":"Figure: namerd","alt":"namerd","nro":97,"url":"../images/namerd-internal.jpg","index":3,"caption_template":"图片 - _CAPTION_","label":"namerd","attributes":{},"skip":false,"key":"1.6.2.2.1.3"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.4","level":"1.6.2.2.1","list_caption":"Figure: linkerd监控","alt":"linkerd监控","nro":98,"url":"../images/linkerd-helloworld-outgoing.jpg","index":4,"caption_template":"图片 - _CAPTION_","label":"linkerd监控","attributes":{},"skip":false,"key":"1.6.2.2.1.4"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.5","level":"1.6.2.2.1","list_caption":"Figure: linkerd监控","alt":"linkerd监控","nro":99,"url":"../images/linkerd-helloworld-incoming.jpg","index":5,"caption_template":"图片 - _CAPTION_","label":"linkerd监控","attributes":{},"skip":false,"key":"1.6.2.2.1.5"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.6","level":"1.6.2.2.1","list_caption":"Figure: linkerd性能监控","alt":"linkerd性能监控","nro":100,"url":"../images/linkerd-grafana.png","index":6,"caption_template":"图片 - _CAPTION_","label":"linkerd性能监控","attributes":{},"skip":false,"key":"1.6.2.2.1.6"},{"backlink":"usecases/linkerd-user-guide.html#fig1.6.2.2.1.7","level":"1.6.2.2.1","list_caption":"Figure: Linkerd ingress controller","alt":"Linkerd ingress controller","nro":101,"url":"../images/linkerd-ingress-controller.jpg","index":7,"caption_template":"图片 - _CAPTION_","label":"Linkerd ingress controller","attributes":{},"skip":false,"key":"1.6.2.2.1.7"},{"backlink":"usecases/spark-standalone-on-kubernetes.html#fig1.6.3.1.1","level":"1.6.3.1","list_caption":"Figure: spark master ui","alt":"spark master ui","nro":102,"url":"../images/spark-ui.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"spark master ui","attributes":{},"skip":false,"key":"1.6.3.1.1"},{"backlink":"usecases/spark-standalone-on-kubernetes.html#fig1.6.3.1.2","level":"1.6.3.1","list_caption":"Figure: zeppelin ui","alt":"zeppelin ui","nro":103,"url":"../images/zeppelin-ui.jpg","index":2,"caption_template":"图片 - _CAPTION_","label":"zeppelin ui","attributes":{},"skip":false,"key":"1.6.3.1.2"},{"backlink":"develop/client-go-sample.html#fig1.7.3.1","level":"1.7.3","list_caption":"Figure: 使用kubernetes dashboard进行故障排查","alt":"使用kubernetes dashboard进行故障排查","nro":104,"url":"../images/kubernetes-client-go-sample-update.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"使用kubernetes dashboard进行故障排查","attributes":{},"skip":false,"key":"1.7.3.1"},{"backlink":"appendix/issues.html#fig1.8.2.1","level":"1.8.2","list_caption":"Figure: pvc-storage-limit","alt":"pvc-storage-limit","nro":105,"url":"../images/pvc-storage-limit.jpg","index":1,"caption_template":"图片 - _CAPTION_","label":"pvc-storage-limit","attributes":{},"skip":false,"key":"1.8.2.1"}]},"title":"Kubernetes Handbook","language":"zh-hans","links":{"sidebar":{"Home":"https://jimmysong.io"}},"gitbook":"*","description":"Kubernetes中文指南/实践手册"},"file":{"path":"usecases/running-spark-with-kubernetes-native-scheduler.md","mtime":"2017-09-27T13:03:00.000Z","type":"markdown"},"gitbook":{"version":"3.2.2","time":"2017-11-06T03:30:16.119Z"},"basePath":"..","book":{"language":""}});
|
||
});
|
||
</script>
|
||
</div>
|
||
|
||
|
||
<script src="../gitbook/gitbook.js"></script>
|
||
<script src="../gitbook/theme.js"></script>
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-github/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-splitter/splitter.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-page-toc-button/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-editlink/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-back-to-top-button/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-search-plus/jquery.mark.min.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-search-plus/search.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-github-buttons/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-3-ba/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-sharing/buttons.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-fontsettings/fontsettings.js"></script>
|
||
|
||
|
||
|
||
</body>
|
||
</html>
|
||
|