2042 lines
108 KiB
HTML
2042 lines
108 KiB
HTML
|
||
<!DOCTYPE HTML>
|
||
<html lang="zh-hans" >
|
||
<head>
|
||
<meta charset="UTF-8">
|
||
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||
<title>5.2.2 运行支持kubernetes原生调度的Spark程序 · Kubernetes Handbook</title>
|
||
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||
<meta name="description" content="">
|
||
<meta name="generator" content="GitBook 3.2.2">
|
||
<meta name="author" content="Jimmy Song">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/style.css">
|
||
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-splitter/splitter.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-page-toc-button/plugin.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-image-captions/image-captions.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-page-footer-ex/style/plugin.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-back-to-top-button/plugin.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-search-plus/search.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-highlight/website.css">
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../gitbook/gitbook-plugin-fontsettings/website.css">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<meta name="HandheldFriendly" content="true"/>
|
||
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
|
||
<meta name="apple-mobile-web-app-capable" content="yes">
|
||
<meta name="apple-mobile-web-app-status-bar-style" content="black">
|
||
<link rel="apple-touch-icon-precomposed" sizes="152x152" href="../gitbook/images/apple-touch-icon-precomposed-152.png">
|
||
<link rel="shortcut icon" href="../gitbook/images/favicon.ico" type="image/x-icon">
|
||
|
||
|
||
<link rel="next" href="serverless.html" />
|
||
|
||
|
||
<link rel="prev" href="spark-standalone-on-kubernetes.html" />
|
||
|
||
|
||
</head>
|
||
<body>
|
||
|
||
<div class="book">
|
||
<div class="book-summary">
|
||
|
||
|
||
<div id="book-search-input" role="search">
|
||
<input type="text" placeholder="输入并搜索" />
|
||
</div>
|
||
|
||
|
||
<nav role="navigation">
|
||
|
||
|
||
|
||
<ul class="summary">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="chapter " data-level="1.1" data-path="../">
|
||
|
||
<a href="../">
|
||
|
||
|
||
1. 前言
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2" data-path="../concepts/">
|
||
|
||
<a href="../concepts/">
|
||
|
||
|
||
2. 概念原理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.2.1" data-path="../concepts/concepts.html">
|
||
|
||
<a href="../concepts/concepts.html">
|
||
|
||
|
||
2.1 设计理念
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2" data-path="../concepts/objects.html">
|
||
|
||
<a href="../concepts/objects.html">
|
||
|
||
|
||
2.2 Objects
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.2.2.1" data-path="../concepts/pod-overview.html">
|
||
|
||
<a href="../concepts/pod-overview.html">
|
||
|
||
|
||
2.2.1 Pod
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.2.2.1.1" data-path="../concepts/pod.html">
|
||
|
||
<a href="../concepts/pod.html">
|
||
|
||
|
||
2.2.1.1 Pod解析
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.1.2" data-path="../concepts/init-containers.html">
|
||
|
||
<a href="../concepts/init-containers.html">
|
||
|
||
|
||
2.2.1.2 Init容器
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.1.3" data-path="../concepts/pod-security-policy.html">
|
||
|
||
<a href="../concepts/pod-security-policy.html">
|
||
|
||
|
||
2.2.1.3 Pod安全策略
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.1.4" data-path="../concepts/pod-lifecycle.html">
|
||
|
||
<a href="../concepts/pod-lifecycle.html">
|
||
|
||
|
||
2.2.1.4 Pod的生命周期
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.2" data-path="../concepts/node.html">
|
||
|
||
<a href="../concepts/node.html">
|
||
|
||
|
||
2.2.2 Node
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.3" data-path="../concepts/namespace.html">
|
||
|
||
<a href="../concepts/namespace.html">
|
||
|
||
|
||
2.2.3 Namespace
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.4" data-path="../concepts/service.html">
|
||
|
||
<a href="../concepts/service.html">
|
||
|
||
|
||
2.2.4 Service
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.5" data-path="../concepts/volume.html">
|
||
|
||
<a href="../concepts/volume.html">
|
||
|
||
|
||
2.2.5 Volume和Persistent Volume
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.6" data-path="../concepts/deployment.html">
|
||
|
||
<a href="../concepts/deployment.html">
|
||
|
||
|
||
2.2.6 Deployment
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.7" data-path="../concepts/secret.html">
|
||
|
||
<a href="../concepts/secret.html">
|
||
|
||
|
||
2.2.7 Secret
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.8" data-path="../concepts/statefulset.html">
|
||
|
||
<a href="../concepts/statefulset.html">
|
||
|
||
|
||
2.2.8 StatefulSet
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.9" data-path="../concepts/daemonset.html">
|
||
|
||
<a href="../concepts/daemonset.html">
|
||
|
||
|
||
2.2.9 DaemonSet
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.10" data-path="../concepts/serviceaccount.html">
|
||
|
||
<a href="../concepts/serviceaccount.html">
|
||
|
||
|
||
2.2.10 ServiceAccount
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.11" data-path="../concepts/replicaset.html">
|
||
|
||
<a href="../concepts/replicaset.html">
|
||
|
||
|
||
2.2.11 ReplicationController和ReplicaSet
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.12" data-path="../concepts/job.html">
|
||
|
||
<a href="../concepts/job.html">
|
||
|
||
|
||
2.2.12 Job
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.13" data-path="../concepts/cronjob.html">
|
||
|
||
<a href="../concepts/cronjob.html">
|
||
|
||
|
||
2.2.13 CronJob
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.14" data-path="../concepts/ingress.html">
|
||
|
||
<a href="../concepts/ingress.html">
|
||
|
||
|
||
2.2.14 Ingress
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.15" data-path="../concepts/configmap.html">
|
||
|
||
<a href="../concepts/configmap.html">
|
||
|
||
|
||
2.2.15 ConfigMap
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.16" data-path="../concepts/horizontal-pod-autoscaling.html">
|
||
|
||
<a href="../concepts/horizontal-pod-autoscaling.html">
|
||
|
||
|
||
2.2.16 Horizontal Pod Autoscaling
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.17" data-path="../concepts/label.html">
|
||
|
||
<a href="../concepts/label.html">
|
||
|
||
|
||
2.2.17 Label
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.18" data-path="../concepts/garbage-collection.html">
|
||
|
||
<a href="../concepts/garbage-collection.html">
|
||
|
||
|
||
2.2.18 垃圾收集
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.2.2.19" data-path="../concepts/network-policy.html">
|
||
|
||
<a href="../concepts/network-policy.html">
|
||
|
||
|
||
2.2.19 NetworkPolicy
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3" data-path="../guide/">
|
||
|
||
<a href="../guide/">
|
||
|
||
|
||
3. 用户指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.1" data-path="../guide/resource-configuration.html">
|
||
|
||
<a href="../guide/resource-configuration.html">
|
||
|
||
|
||
3.1 资源配置
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.1.1" data-path="../guide/configure-liveness-readiness-probes.html">
|
||
|
||
<a href="../guide/configure-liveness-readiness-probes.html">
|
||
|
||
|
||
3.1.1 配置Pod的liveness和readiness探针
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.1.2" data-path="../guide/configure-pod-service-account.html">
|
||
|
||
<a href="../guide/configure-pod-service-account.html">
|
||
|
||
|
||
3.1.2 配置Pod的Service Account
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2" data-path="../guide/command-usage.html">
|
||
|
||
<a href="../guide/command-usage.html">
|
||
|
||
|
||
3.2 命令使用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.2.1" data-path="../guide/using-kubectl.html">
|
||
|
||
<a href="../guide/using-kubectl.html">
|
||
|
||
|
||
3.2.1 使用kubectl
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.2.2" data-path="../guide/docker-cli-to-kubectl.html">
|
||
|
||
<a href="../guide/docker-cli-to-kubectl.html">
|
||
|
||
|
||
3.2.2 docker用户过度到kubectl命令行指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.3" data-path="../guide/cluster-security-management.html">
|
||
|
||
<a href="../guide/cluster-security-management.html">
|
||
|
||
|
||
3.3 集群安全性管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.3.1" data-path="../guide/managing-tls-in-a-cluster.html">
|
||
|
||
<a href="../guide/managing-tls-in-a-cluster.html">
|
||
|
||
|
||
3.3.1 管理集群中的TLS
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.3.2" data-path="../guide/kubelet-authentication-authorization.html">
|
||
|
||
<a href="../guide/kubelet-authentication-authorization.html">
|
||
|
||
|
||
3.3.2 kubelet的认证授权
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.3.3" data-path="../guide/tls-bootstrapping.html">
|
||
|
||
<a href="../guide/tls-bootstrapping.html">
|
||
|
||
|
||
3.3.3 TLS bootstrap
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.3.4" data-path="../guide/kubectl-user-authentication-authorization.html">
|
||
|
||
<a href="../guide/kubectl-user-authentication-authorization.html">
|
||
|
||
|
||
3.3.4 kubectl的用户认证授权
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.3.5" data-path="../guide/rbac.html">
|
||
|
||
<a href="../guide/rbac.html">
|
||
|
||
|
||
3.3.5 RBAC——基于角色的访问控制
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.3.6" data-path="../guide/ip-masq-agent.html">
|
||
|
||
<a href="../guide/ip-masq-agent.html">
|
||
|
||
|
||
3.3.6 IP伪装代理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.4" data-path="../guide/access-kubernetes-cluster.html">
|
||
|
||
<a href="../guide/access-kubernetes-cluster.html">
|
||
|
||
|
||
3.4 访问 Kubernetes 集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.4.1" data-path="../guide/access-cluster.html">
|
||
|
||
<a href="../guide/access-cluster.html">
|
||
|
||
|
||
3.4.1 访问集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.4.2" data-path="../guide/authenticate-across-clusters-kubeconfig.html">
|
||
|
||
<a href="../guide/authenticate-across-clusters-kubeconfig.html">
|
||
|
||
|
||
3.4.2 使用 kubeconfig 文件配置跨集群认证
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.4.3" data-path="../guide/connecting-to-applications-port-forward.html">
|
||
|
||
<a href="../guide/connecting-to-applications-port-forward.html">
|
||
|
||
|
||
3.4.3 通过端口转发访问集群中的应用程序
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.4.4" data-path="../guide/service-access-application-cluster.html">
|
||
|
||
<a href="../guide/service-access-application-cluster.html">
|
||
|
||
|
||
3.4.4 使用 service 访问群集中的应用程序
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.5" data-path="../guide/application-development-deployment-flow.html">
|
||
|
||
<a href="../guide/application-development-deployment-flow.html">
|
||
|
||
|
||
3.5 在kubernetes中开发部署应用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.3.5.1" data-path="../guide/deploy-applications-in-kubernetes.html">
|
||
|
||
<a href="../guide/deploy-applications-in-kubernetes.html">
|
||
|
||
|
||
3.5.1 适用于kubernetes的应用开发部署流程
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.3.5.2" data-path="../guide/migrating-hadoop-yarn-to-kubernetes.html">
|
||
|
||
<a href="../guide/migrating-hadoop-yarn-to-kubernetes.html">
|
||
|
||
|
||
3.5.2 迁移传统应用到kubernetes中——以Hadoop YARN为例
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4" data-path="../practice/">
|
||
|
||
<a href="../practice/">
|
||
|
||
|
||
4. 最佳实践
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.1" data-path="../practice/install-kbernetes1.6-on-centos.html">
|
||
|
||
<a href="../practice/install-kbernetes1.6-on-centos.html">
|
||
|
||
|
||
4.1 在CentOS上部署kubernetes1.6集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.1.1" data-path="../practice/create-tls-and-secret-key.html">
|
||
|
||
<a href="../practice/create-tls-and-secret-key.html">
|
||
|
||
|
||
4.1.1 创建TLS证书和秘钥
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.2" data-path="../practice/create-kubeconfig.html">
|
||
|
||
<a href="../practice/create-kubeconfig.html">
|
||
|
||
|
||
4.1.2 创建kubeconfig文件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.3" data-path="../practice/etcd-cluster-installation.html">
|
||
|
||
<a href="../practice/etcd-cluster-installation.html">
|
||
|
||
|
||
4.1.3 创建高可用etcd集群
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.4" data-path="../practice/kubectl-installation.html">
|
||
|
||
<a href="../practice/kubectl-installation.html">
|
||
|
||
|
||
4.1.4 安装kubectl命令行工具
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.5" data-path="../practice/master-installation.html">
|
||
|
||
<a href="../practice/master-installation.html">
|
||
|
||
|
||
4.1.5 部署master节点
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.6" data-path="../practice/node-installation.html">
|
||
|
||
<a href="../practice/node-installation.html">
|
||
|
||
|
||
4.1.6 部署node节点
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.7" data-path="../practice/kubedns-addon-installation.html">
|
||
|
||
<a href="../practice/kubedns-addon-installation.html">
|
||
|
||
|
||
4.1.7 安装kubedns插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.8" data-path="../practice/dashboard-addon-installation.html">
|
||
|
||
<a href="../practice/dashboard-addon-installation.html">
|
||
|
||
|
||
4.1.8 安装dashboard插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.9" data-path="../practice/heapster-addon-installation.html">
|
||
|
||
<a href="../practice/heapster-addon-installation.html">
|
||
|
||
|
||
4.1.9 安装heapster插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.1.10" data-path="../practice/efk-addon-installation.html">
|
||
|
||
<a href="../practice/efk-addon-installation.html">
|
||
|
||
|
||
4.1.10 安装EFK插件
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2" data-path="../practice/service-discovery-and-loadbalancing.html">
|
||
|
||
<a href="../practice/service-discovery-and-loadbalancing.html">
|
||
|
||
|
||
4.2 服务发现与负载均衡
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.2.1" data-path="../practice/traefik-ingress-installation.html">
|
||
|
||
<a href="../practice/traefik-ingress-installation.html">
|
||
|
||
|
||
4.2.1 安装Traefik ingress
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2.2" data-path="../practice/distributed-load-test.html">
|
||
|
||
<a href="../practice/distributed-load-test.html">
|
||
|
||
|
||
4.2.2 分布式负载测试
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2.3" data-path="../practice/network-and-cluster-perfermance-test.html">
|
||
|
||
<a href="../practice/network-and-cluster-perfermance-test.html">
|
||
|
||
|
||
4.2.3 网络和集群性能测试
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.2.4" data-path="../practice/edge-node-configuration.html">
|
||
|
||
<a href="../practice/edge-node-configuration.html">
|
||
|
||
|
||
4.2.4 边缘节点配置
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3" data-path="../practice/operation.html">
|
||
|
||
<a href="../practice/operation.html">
|
||
|
||
|
||
4.3 运维管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.3.1" data-path="../practice/service-rolling-update.html">
|
||
|
||
<a href="../practice/service-rolling-update.html">
|
||
|
||
|
||
4.3.1 服务滚动升级
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.2" data-path="../practice/app-log-collection.html">
|
||
|
||
<a href="../practice/app-log-collection.html">
|
||
|
||
|
||
4.3.2 应用日志收集
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.3" data-path="../practice/configuration-best-practice.html">
|
||
|
||
<a href="../practice/configuration-best-practice.html">
|
||
|
||
|
||
4.3.3 配置最佳实践
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.4" data-path="../practice/monitor.html">
|
||
|
||
<a href="../practice/monitor.html">
|
||
|
||
|
||
4.3.4 集群及应用监控
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.5" data-path="../practice/jenkins-ci-cd.html">
|
||
|
||
<a href="../practice/jenkins-ci-cd.html">
|
||
|
||
|
||
4.3.5 使用Jenkins进行持续构建与发布
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.6" data-path="../practice/data-persistence-problem.html">
|
||
|
||
<a href="../practice/data-persistence-problem.html">
|
||
|
||
|
||
4.3.6 数据持久化问题
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.3.7" data-path="../practice/manage-compute-resources-container.html">
|
||
|
||
<a href="../practice/manage-compute-resources-container.html">
|
||
|
||
|
||
4.3.7 管理容器的计算资源
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4" data-path="../practice/storage.html">
|
||
|
||
<a href="../practice/storage.html">
|
||
|
||
|
||
4.4 存储管理
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.4.1" data-path="../practice/glusterfs.html">
|
||
|
||
<a href="../practice/glusterfs.html">
|
||
|
||
|
||
4.4.1 GlusterFS
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.4.1.1" data-path="../practice/using-glusterfs-for-persistent-storage.html">
|
||
|
||
<a href="../practice/using-glusterfs-for-persistent-storage.html">
|
||
|
||
|
||
4.4.1.1 使用GlusterFS做持久化存储
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4.1.2" data-path="../practice/storage-for-containers-using-glusterfs-with-openshift.html">
|
||
|
||
<a href="../practice/storage-for-containers-using-glusterfs-with-openshift.html">
|
||
|
||
|
||
4.4.1.2 在OpenShift中使用GlusterFS做持久化存储
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.4.4.2" data-path="../practice/cephfs.html">
|
||
|
||
<a href="../practice/cephfs.html">
|
||
|
||
|
||
4.4.2 CephFS
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.4.4.2.1" data-path="../practice/using-ceph-for-persistent-storage.html">
|
||
|
||
<a href="../practice/using-ceph-for-persistent-storage.html">
|
||
|
||
|
||
4.4.2.1 使用Ceph做持久化存储
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5" data-path="./">
|
||
|
||
<a href="./">
|
||
|
||
|
||
5. 领域应用
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.1" data-path="microservices.html">
|
||
|
||
<a href="microservices.html">
|
||
|
||
|
||
5.1 微服务架构
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.1.1" data-path="service-discovery-in-microservices.html">
|
||
|
||
<a href="service-discovery-in-microservices.html">
|
||
|
||
|
||
5.1.1 微服务中的服务发现
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2" data-path="service-mesh.html">
|
||
|
||
<a href="service-mesh.html">
|
||
|
||
|
||
5.2 Service Mesh 服务网格
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.2.1" data-path="istio.html">
|
||
|
||
<a href="istio.html">
|
||
|
||
|
||
5.1.1 Istio
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.2.1.1" data-path="istio-installation.html">
|
||
|
||
<a href="istio-installation.html">
|
||
|
||
|
||
5.1.1.1 安装istio
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2.1.2" data-path="configuring-request-routing.html">
|
||
|
||
<a href="configuring-request-routing.html">
|
||
|
||
|
||
5.1.1.2 配置请求的路由规则
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.2.2" data-path="linkerd.html">
|
||
|
||
<a href="linkerd.html">
|
||
|
||
|
||
5.1.2 Linkerd
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.2.2.1" data-path="linkerd-user-guide.html">
|
||
|
||
<a href="linkerd-user-guide.html">
|
||
|
||
|
||
5.1.2.1 Linkerd 使用指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.3" data-path="big-data.html">
|
||
|
||
<a href="big-data.html">
|
||
|
||
|
||
5.2 大数据
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.5.3.1" data-path="spark-standalone-on-kubernetes.html">
|
||
|
||
<a href="spark-standalone-on-kubernetes.html">
|
||
|
||
|
||
5.2.1 Spark standalone on Kubernetes
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter active" data-level="1.5.3.2" data-path="running-spark-with-kubernetes-native-scheduler.html">
|
||
|
||
<a href="running-spark-with-kubernetes-native-scheduler.html">
|
||
|
||
|
||
5.2.2 运行支持kubernetes原生调度的Spark程序
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.5.4" data-path="serverless.html">
|
||
|
||
<a href="serverless.html">
|
||
|
||
|
||
5.3 Serverless架构
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6" data-path="../develop/">
|
||
|
||
<a href="../develop/">
|
||
|
||
|
||
6. 开发指南
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.6.1" data-path="../develop/developing-environment.html">
|
||
|
||
<a href="../develop/developing-environment.html">
|
||
|
||
|
||
6.1 开发环境搭建
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.2" data-path="../develop/testing.html">
|
||
|
||
<a href="../develop/testing.html">
|
||
|
||
|
||
6.2 单元测试和集成测试
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.3" data-path="../develop/client-go-sample.html">
|
||
|
||
<a href="../develop/client-go-sample.html">
|
||
|
||
|
||
6.3 client-go示例
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.6.4" data-path="../develop/contribute.html">
|
||
|
||
<a href="../develop/contribute.html">
|
||
|
||
|
||
6.4 社区贡献
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7" data-path="../appendix/">
|
||
|
||
<a href="../appendix/">
|
||
|
||
|
||
7. 附录
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="articles">
|
||
|
||
|
||
<li class="chapter " data-level="1.7.1" data-path="../appendix/docker-best-practice.html">
|
||
|
||
<a href="../appendix/docker-best-practice.html">
|
||
|
||
|
||
7.1 Docker最佳实践
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7.2" data-path="../appendix/issues.html">
|
||
|
||
<a href="../appendix/issues.html">
|
||
|
||
|
||
7.2 问题记录
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
<li class="chapter " data-level="1.7.3" data-path="../appendix/tricks.html">
|
||
|
||
<a href="../appendix/tricks.html">
|
||
|
||
|
||
7.3 使用技巧
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
</ul>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
<li class="divider"></li>
|
||
|
||
<li>
|
||
<a href="https://www.gitbook.com" target="blank" class="gitbook-link">
|
||
本书使用 GitBook 发布
|
||
</a>
|
||
</li>
|
||
</ul>
|
||
|
||
|
||
</nav>
|
||
|
||
|
||
</div>
|
||
|
||
<div class="book-body">
|
||
|
||
<div class="body-inner">
|
||
|
||
|
||
|
||
<div class="book-header" role="navigation">
|
||
|
||
|
||
<!-- Title -->
|
||
<h1>
|
||
<i class="fa fa-circle-o-notch fa-spin"></i>
|
||
<a href=".." >5.2.2 运行支持kubernetes原生调度的Spark程序</a>
|
||
</h1>
|
||
</div>
|
||
|
||
|
||
|
||
|
||
<div class="page-wrapper" tabindex="-1" role="main">
|
||
<div class="page-inner">
|
||
|
||
<div class="search-plus" id="book-search-results">
|
||
<div class="search-noresults">
|
||
|
||
<section class="normal markdown-section">
|
||
|
||
<h1 id="运行支持kubernetes原生调度的spark程序">运行支持kubernetes原生调度的Spark程序</h1>
|
||
<p>我们之前就在 kubernetes 中运行过 standalone 方式的 spark 集群,见 <a href="spark-standalone-on-kubernetes.html">Spark standalone on kubernetes</a>。</p>
|
||
<p>目前运行支持 kubernetes 原生调度的 spark 程序由 Google 主导,目前运行支持 kubernetes 原生调度的 spark 程序由 Google 主导,fork 自 spark 的官方代码库,见<a href="https://github.com/apache-spark-on-k8s/spark/" target="_blank">https://github.com/apache-spark-on-k8s/spark/</a> ,属于Big Data SIG。</p>
|
||
<p>参与到该项目的公司有:</p>
|
||
<ul>
|
||
<li>Bloomberg</li>
|
||
<li>Google</li>
|
||
<li>Haiwen</li>
|
||
<li>Hyperpilot</li>
|
||
<li>Intel</li>
|
||
<li>Palantir</li>
|
||
<li>Pepperdata</li>
|
||
<li>Red Hat</li>
|
||
</ul>
|
||
<h2 id="spark-概念说明">Spark 概念说明</h2>
|
||
<p><a href="http://spark.apache.org" target="_blank">Apache Spark</a> 是一个围绕速度、易用性和复杂分析构建的大数据处理框架。最初在2009年由加州大学伯克利分校的AMPLab开发,并于2010年成为Apache的开源项目之一。</p>
|
||
<p>在 Spark 中包括如下组件或概念:</p>
|
||
<ul>
|
||
<li><strong>Application</strong>:Spark Application 的概念和 Hadoop 中的 MapReduce 类似,指的是用户编写的 Spark 应用程序,包含了一个 Driver 功能的代码和分布在集群中多个节点上运行的 Executor 代码;</li>
|
||
<li><strong>Driver</strong>:Spark 中的 Driver 即运行上述 Application 的 main() 函数并且创建 SparkContext,其中创建 SparkContext 的目的是为了准备Spark应用程序的运行环境。在 Spark 中由 SparkContext 负责和 ClusterManager 通信,进行资源的申请、任务的分配和监控等;当 Executor 部分运行完毕后,Driver负责将SparkContext 关闭。通常用 SparkContext 代表 Driver;</li>
|
||
<li><strong>Executor</strong>:Application运行在Worker 节点上的一个进程,该进程负责运行Task,并且负责将数据存在内存或者磁盘上,每个Application都有各自独立的一批Executor。在Spark on Yarn模式下,其进程名称为<code>CoarseGrainedExecutorBackend</code>,类似于 Hadoop MapReduce 中的 YarnChild。一个 <code>CoarseGrainedExecutorBackend</code> 进程有且仅有一个 executor 对象,它负责将 Task 包装成 taskRunner,并从线程池中抽取出一个空闲线程运行 Task。每个 <code>CoarseGrainedExecutorBackend</code> 能并行运行 Task 的数量就取决于分配给它的 CPU 的个数了;</li>
|
||
<li><strong>Cluster Manager</strong>:指的是在集群上获取资源的外部服务,目前有:<ul>
|
||
<li>Standalone:Spark原生的资源管理,由Master负责资源的分配;</li>
|
||
<li>Hadoop Yarn:由YARN中的ResourceManager负责资源的分配;</li>
|
||
</ul>
|
||
</li>
|
||
<li><strong>Worker</strong>:集群中任何可以运行Application代码的节点,类似于YARN中的NodeManager节点。在Standalone模式中指的就是通过Slave文件配置的Worker节点,在Spark on Yarn模式中指的就是NodeManager节点;</li>
|
||
<li><strong>作业(Job)</strong>:包含多个Task组成的并行计算,往往由Spark Action催生,一个JOB包含多个RDD及作用于相应RDD上的各种Operation;</li>
|
||
<li><strong>阶段(Stage)</strong>:每个Job会被拆分很多组 Task,每组任务被称为Stage,也可称TaskSet,一个作业分为多个阶段,每一个stage的分割点是action。比如一个job是:(transformation1 -> transformation1 -> action1 -> transformation3 -> action2),这个job就会被分为两个stage,分割点是action1和action2。</li>
|
||
<li><p><strong>任务(Task)</strong>: 被送到某个Executor上的工作任务;</p>
|
||
</li>
|
||
<li><p><strong>Context</strong>:启动spark application的时候创建,作为Spark 运行时环境。</p>
|
||
</li>
|
||
<li><strong>Dynamic Allocation(动态资源分配)</strong>:一个配置选项,可以将其打开。从Spark1.2之后,对于On Yarn模式,已经支持动态资源分配(Dynamic Resource Allocation),这样,就可以根据Application的负载(Task情况),动态的增加和减少executors,这种策略非常适合在YARN上使用spark-sql做数据开发和分析,以及将spark-sql作为长服务来使用的场景。Executor 的动态分配需要在 cluster mode 下启用 "external shuffle service"。</li>
|
||
<li><strong>动态资源分配策略</strong>:开启动态分配策略后,application会在task因没有足够资源被挂起的时候去动态申请资源,这意味着该application现有的executor无法满足所有task并行运行。spark一轮一轮的申请资源,当有task挂起或等待 <code>spark.dynamicAllocation.schedulerBacklogTimeout</code> (默认1s)时间的时候,会开始动态资源分配;之后会每隔 <code>spark.dynamicAllocation.sustainedSchedulerBacklogTimeout</code> (默认1s)时间申请一次,直到申请到足够的资源。每次申请的资源量是指数增长的,即1,2,4,8等。之所以采用指数增长,出于两方面考虑:其一,开始申请的少是考虑到可能application会马上得到满足;其次要成倍增加,是为了防止application需要很多资源,而该方式可以在很少次数的申请之后得到满足。</li>
|
||
</ul>
|
||
<h2 id="架构设计">架构设计</h2>
|
||
<p>关于 spark standalone 的局限性与 kubernetes native spark 架构之间的区别请参考 Anirudh Ramanathan 在 2016年10月8日提交的 issue <a href="https://github.com/kubernetes/kubernetes/issues/34377" target="_blank">Support Spark natively in Kubernetes #34377</a>。</p>
|
||
<p>简而言之,spark standalone on kubernetes 有如下几个缺点:</p>
|
||
<ul>
|
||
<li>无法对于多租户做隔离,每个用户都想给 pod 申请 node 节点可用的最大的资源。</li>
|
||
<li>Spark 的 master/worker 本来不是设计成使用 kubernetes 的资源调度,这样会存在两层的资源调度问题,不利于与 kuberentes 集成。</li>
|
||
</ul>
|
||
<p>而 kubernetes native spark 集群中,spark 可以调用 kubernetes API 获取集群资源和调度。要实现 kubernetes native spark 需要为 spark 提供一个集群外部的 manager 可以用来跟 kubernetes API 交互。</p>
|
||
<h3 id="调度器后台">调度器后台</h3>
|
||
<p>使用 kubernetes 原生调度的 spark 的基本设计思路是将 spark 的 driver 和 executor 都放在 kubernetes 的 pod 中运行,另外还有两个附加的组件:<code>ResourceStagingServer</code> 和 <code>KubernetesExternalShuffleService</code>。</p>
|
||
<p>Spark driver 其实可以运行在 kubernetes 集群内部(cluster mode)可以运行在外部(client mode),executor 只能运行在集群内部,当有 spark 作业提交到 kubernetes 集群上时,调度器后台将会为 executor pod 设置如下属性:</p>
|
||
<ul>
|
||
<li>使用我们预先编译好的包含 kubernetes 支持的 spark 镜像,然后调用 <code>CoarseGrainedExecutorBackend</code> main class 启动 JVM。</li>
|
||
<li>调度器后台为 executor pod 的运行时注入环境变量,例如各种 JVM 参数,包括用户在 <code>spark-submit</code> 时指定的那些参数。</li>
|
||
<li>Executor 的 CPU、内存限制根据这些注入的环境变量保存到应用程序的 <code>SparkConf</code> 中。</li>
|
||
<li>可以在配置中指定 spark 运行在指定的 namespace 中。</li>
|
||
</ul>
|
||
<p>参考:<a href="https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/architecture-docs/scheduler-backend.md" target="_blank">Scheduler backend 文档</a></p>
|
||
<h2 id="安装指南">安装指南</h2>
|
||
<p>我们可以直接使用官方已编译好的 docker 镜像来部署,下面是官方发布的镜像:</p>
|
||
<table>
|
||
<thead>
|
||
<tr>
|
||
<th>组件</th>
|
||
<th>镜像</th>
|
||
</tr>
|
||
</thead>
|
||
<tbody>
|
||
<tr>
|
||
<td>Spark Driver Image</td>
|
||
<td><code>kubespark/spark-driver:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>Spark Executor Image</td>
|
||
<td><code>kubespark/spark-executor:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>Spark Initialization Image</td>
|
||
<td><code>kubespark/spark-init:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>Spark Staging Server Image</td>
|
||
<td><code>kubespark/spark-resource-staging-server:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>PySpark Driver Image</td>
|
||
<td><code>kubespark/driver-py:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
<tr>
|
||
<td>PySpark Executor Image</td>
|
||
<td><code>kubespark/executor-py:v2.1.0-kubernetes-0.3.1</code></td>
|
||
</tr>
|
||
</tbody>
|
||
</table>
|
||
<p>我将这些镜像放到了我的私有镜像仓库中了。</p>
|
||
<p>还需要安装支持 kubernetes 的 spark 客户端,在这里下载:<a href="https://github.com/apache-spark-on-k8s/spark/releases" target="_blank">https://github.com/apache-spark-on-k8s/spark/releases</a></p>
|
||
<p>根据使用的镜像版本,我下载的是 <a href="https://github.com/apache-spark-on-k8s/spark/releases/tag/v2.1.0-kubernetes-0.3.1" target="_blank">v2.1.0-kubernetes-0.3.1</a> </p>
|
||
<p><strong>运行 SparkPi 测试</strong></p>
|
||
<p>我们将任务运行在 <code>spark-cluster</code> 的 namespace 中,启动 5 个 executor 实例。</p>
|
||
<pre><code class="lang-bash">./bin/spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/kubespark-spark-driver:v2.1.0-kubernetes-0.3.1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/kubespark-spark-executor:v2.1.0-kubernetes-0.3.1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/kubespark-spark-init:v2.1.0-kubernetes-0.3.1 \
|
||
<span class="hljs-built_in">local</span>:///opt/spark/examples/jars/spark-examples_2.11-2.1.0-k8s-0.3.1-SNAPSHOT.jar
|
||
</code></pre>
|
||
<p>关于该命令参数的介绍请参考:<a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html" target="_blank">https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html</a></p>
|
||
<p><strong>注意:</strong> 该 jar 包实际上是 <code>spark.kubernetes.executor.docker.image</code> 镜像中的。</p>
|
||
<p>这时候提交任务运行还是失败,报错信息中可以看到两个问题:</p>
|
||
<ul>
|
||
<li>Executor 无法找到 driver pod</li>
|
||
<li>用户 <code>system:serviceaccount:spark-cluster:defaul</code> 没有权限获取 <code>spark-cluster</code> 中的 pod 信息。</li>
|
||
</ul>
|
||
<p>提了个 issue <a href="https://github.com/apache-spark-on-k8s/spark/issues/478" target="_blank">Failed to run the sample spark-pi test using spark-submit on the doc #478</a> </p>
|
||
<p>需要为 spark 集群创建一个 <code>serviceaccount</code> 和 <code>clusterrolebinding</code>:</p>
|
||
<pre><code class="lang-bash">kubectl create serviceaccount spark --namespace spark-cluster
|
||
kubectl create rolebinding spark-edit --clusterrole=edit --serviceaccount=spark-cluster:spark --namespace=spark-cluster
|
||
</code></pre>
|
||
<p>该 Bug 将在新版本中修复。</p>
|
||
<h2 id="用户指南">用户指南</h2>
|
||
<h3 id="编译">编译</h3>
|
||
<p>Fork 并克隆项目到本地:</p>
|
||
<pre><code class="lang-bash">git <span class="hljs-built_in">clone</span> https://github.com/rootsongjc/spark.git
|
||
</code></pre>
|
||
<p>编译前请确保你的环境中已经安装 Java8 和 Maven3。</p>
|
||
<pre><code class="lang-bash"><span class="hljs-comment">## 第一次编译前需要安装依赖</span>
|
||
build/mvn install -Pkubernetes -pl resource-managers/kubernetes/core -am -DskipTests
|
||
|
||
<span class="hljs-comment">## 编译 spark on kubernetes</span>
|
||
build/mvn compile -Pkubernetes -pl resource-managers/kubernetes/core -am -DskipTests
|
||
|
||
<span class="hljs-comment">## 发布</span>
|
||
dev/make-distribution.sh --tgz -Phadoop-2.7 -Pkubernetes
|
||
</code></pre>
|
||
<p>第一次编译和发布的过程耗时可能会比较长,请耐心等待,如果有依赖下载不下来,请自备梯子。</p>
|
||
<p>详细的开发指南请见:<a href="https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/README.md" target="_blank">https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/README.md</a></p>
|
||
<h3 id="构建镜像">构建镜像</h3>
|
||
<p>使用该脚本来自动构建容器镜像:<a href="https://github.com/apache-spark-on-k8s/spark/pull/488" target="_blank">https://github.com/apache-spark-on-k8s/spark/pull/488</a></p>
|
||
<p>将该脚本放在 <code>dist</code> 目录下,执行:</p>
|
||
<pre><code class="lang-bash">./build-push-docker-images.sh -r sz-pg-oam-docker-hub-001.tendcloud.com/library -t v2.1.0-kubernetes-0.3.1-1 build
|
||
./build-push-docker-images.sh -r sz-pg-oam-docker-hub-001.tendcloud.com/library -t v2.1.0-kubernetes-0.3.1-1 push
|
||
</code></pre>
|
||
<p><strong>注意:</strong>如果你使用的 MacOS,bash 的版本可能太低,执行改脚本将出错,请检查你的 bash 版本:</p>
|
||
<pre><code class="lang-bash">bash --version
|
||
GNU bash, version 3.2.57(1)-release (x86_64-apple-darwin16)
|
||
Copyright (C) 2007 Free Software Foundation, Inc.
|
||
</code></pre>
|
||
<p>上面我在升级 bash 之前获取的版本信息,使用下面的命令升级 bash:</p>
|
||
<pre><code class="lang-bash">brew install bash
|
||
</code></pre>
|
||
<p>升级后的 bash 版本为 <code>4.4.12(1)-release (x86_64-apple-darwin16.3.0)</code>。</p>
|
||
<p>编译并上传镜像到我的私有镜像仓库,将会构建出如下几个镜像:</p>
|
||
<pre><code class="lang-bash">sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-resource-staging-server:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-shuffle:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor-py:v2.1.0-kubernetes-0.3.1-1
|
||
sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver-py:v2.1.0-kubernetes-0.3.1-1
|
||
</code></pre>
|
||
<h2 id="运行测试">运行测试</h2>
|
||
<p>在 <code>dist/bin</code> 目录下执行 spark-pi 测试:</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
<span class="hljs-built_in">local</span>:///opt/spark/examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar
|
||
</code></pre>
|
||
<p>详细的参数说明见:<a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html" target="_blank">https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html</a></p>
|
||
<p><strong>注意:</strong><code>local:///opt/spark/examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar</code> 文件是在 <code>spark-driver</code> 和 <code>spark-executor</code> 镜像里的,在上一步构建镜像时已经构建并上传到了镜像仓库中。</p>
|
||
<p>执行日志显示:</p>
|
||
<pre><code class="lang-bash">2017-09-14 14:59:01 INFO Client:54 - Waiting <span class="hljs-keyword">for</span> application spark-pi to finish...
|
||
2017-09-14 14:59:01 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: N/A
|
||
start time: N/A
|
||
container images: N/A
|
||
phase: Pending
|
||
status: []
|
||
2017-09-14 14:59:01 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: N/A
|
||
container images: N/A
|
||
phase: Pending
|
||
status: []
|
||
2017-09-14 14:59:01 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: 2017-09-14T06:59:01Z
|
||
container images: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
phase: Pending
|
||
status: [ContainerStatus(containerID=null, image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1, imageID=, lastState=ContainerState(running=null, terminated=null, waiting=null, additionalProperties={}), name=spark-kubernetes-driver, ready=<span class="hljs-literal">false</span>, restartCount=0, state=ContainerState(running=null, terminated=null, waiting=ContainerStateWaiting(message=null, reason=ContainerCreating, additionalProperties={}), additionalProperties={}), additionalProperties={})]
|
||
2017-09-14 14:59:03 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: 2017-09-14T06:59:01Z
|
||
container images: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
phase: Running
|
||
status: [ContainerStatus(containerID=docker://5c5c821c482a1e35552adccb567020532b79244392374f25754f0050e6<span class="hljs-built_in">cd</span>4c62, image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1, imageID=docker-pullable://sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver@sha256:beb92a3e3f178e286d9e5baebdead88b5ba76d651f347ad2864bb6f8eda26f94, lastState=ContainerState(running=null, terminated=null, waiting=null, additionalProperties={}), name=spark-kubernetes-driver, ready=<span class="hljs-literal">true</span>, restartCount=0, state=ContainerState(running=ContainerStateRunning(startedAt=2017-09-14T06:59:02Z, additionalProperties={}), terminated=null, waiting=null, additionalProperties={}), additionalProperties={})]
|
||
2017-09-14 14:59:12 INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:
|
||
pod name: spark-pi-1505372339796-driver
|
||
namespace: spark-cluster
|
||
labels: spark-app-selector -> spark<span class="hljs-_">-f</span>4d3a5d3ad964a05a51feb6191d50357, spark-role -> driver
|
||
pod uid: 304cf440-991a-11e7-970c<span class="hljs-_">-f</span>4e9d49f8ed0
|
||
creation time: 2017-09-14T06:59:01Z
|
||
service account name: spark
|
||
volumes: spark-token-zr8wv
|
||
node name: 172.20.0.114
|
||
start time: 2017-09-14T06:59:01Z
|
||
container images: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
phase: Succeeded
|
||
status: [ContainerStatus(containerID=docker://5c5c821c482a1e35552adccb567020532b79244392374f25754f0050e6<span class="hljs-built_in">cd</span>4c62, image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1, imageID=docker-pullable://sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver@sha256:beb92a3e3f178e286d9e5baebdead88b5ba76d651f347ad2864bb6f8eda26f94, lastState=ContainerState(running=null, terminated=null, waiting=null, additionalProperties={}), name=spark-kubernetes-driver, ready=<span class="hljs-literal">false</span>, restartCount=0, state=ContainerState(running=null, terminated=ContainerStateTerminated(containerID=docker://5c5c821c482a1e35552adccb567020532b79244392374f25754f0050e6<span class="hljs-built_in">cd</span>4c62, <span class="hljs-built_in">exit</span>Code=0, finishedAt=2017-09-14T06:59:11Z, message=null, reason=Completed, signal=null, startedAt=null, additionalProperties={}), waiting=null, additionalProperties={}), additionalProperties={})]
|
||
2017-09-14 14:59:12 INFO LoggingPodStatusWatcherImpl:54 - Container final statuses:
|
||
|
||
|
||
Container name: spark-kubernetes-driver
|
||
Container image: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1
|
||
Container state: Terminated
|
||
Exit code: 0
|
||
2017-09-14 14:59:12 INFO Client:54 - Application spark-pi finished.
|
||
</code></pre>
|
||
<p>从日志中可以看到任务运行的状态信息。</p>
|
||
<p>使用下面的命令可以看到 kubernetes 启动的 Pod 信息:</p>
|
||
<pre><code class="lang-bash">kubectl --namespace spark-cluster get pods -w
|
||
</code></pre>
|
||
<p>将会看到 <code>spark-driver</code> 和 <code>spark-exec</code> 的 Pod 信息。</p>
|
||
<h2 id="依赖管理">依赖管理</h2>
|
||
<p>上文中我们在运行测试程序时,命令行中指定的 jar 文件已包含在 docker 镜像中,是不是说我们每次提交任务都需要重新创建一个镜像呢?非也!如果真是这样也太麻烦了。</p>
|
||
<h4 id="创建-resource-staging-server">创建 resource staging server</h4>
|
||
<p>为了方便用户提交任务,不需要每次提交任务的时候都创建一个镜像,我们使用了 <strong>resource staging server</strong> 。</p>
|
||
<pre><code>kubectl create -f conf/kubernetes-resource-staging-server.yaml
|
||
</code></pre><p>我们同样将其部署在 <code>spark-cluster</code> namespace 下,该 yaml 文件见 <a href="https://github.com/rootsongjc/kubernetes-handbook" target="_blank">kubernetes-handbook</a> 的 <code>manifests/spark-with-kubernetes-native-scheduler</code> 目录。</p>
|
||
<h4 id="优化">优化</h4>
|
||
<p>其中有一点需要优化,在使用下面的命令提交任务时,使用 <code>--conf spark.kubernetes.resourceStagingServer.uri</code> 参数指定 <em>resource staging server</em> 地址,用户不应该关注 <em>resource staging server</em> 究竟运行在哪台宿主机上,可以使用下面两种方式实现:</p>
|
||
<ul>
|
||
<li>使用 <code>nodeSelector</code> 将 <em>resource staging server</em> 固定调度到某一台机器上,该地址依然使用宿主机的 IP 地址</li>
|
||
<li>改变 <code>spark-resource-staging-service</code> service 的 type 为 <strong>ClusterIP</strong>, 然后使用 <strong>Ingress</strong> 将其暴露到集群外部,然后加入的内网 DNS 里,用户使用 DNS 名称指定 <em>resource staging server</em> 的地址。</li>
|
||
</ul>
|
||
<p>然后可以执行下面的命令来提交本地的 jar 到 kubernetes 上运行。</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class org.apache.spark.examples.SparkPi \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.resourceStagingServer.uri=http://172.20.0.114:31000 \
|
||
../examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar
|
||
</code></pre>
|
||
<p>该命令将提交本地的 <code>../examples/jars/spark-examples_2.11-2.2.0-k8s-0.4.0-SNAPSHOT.jar</code> 文件到 <em>resource staging server</em>,executor 将从该 server 上获取 jar 包并运行,这样用户就不需要每次提交任务都编译一个镜像了。</p>
|
||
<p>详见:<a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html#dependency-management" target="_blank">https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html#dependency-management</a></p>
|
||
<h4 id="设置-hdfs-用户">设置 HDFS 用户</h4>
|
||
<p>如果 Hadoop 集群没有设置 kerbros 安全认证的话,在指定 <code>spark-submit</code> 的时候可以通过指定如下四个环境变量, 设置 Spark 与 HDFS 通信使用的用户:</p>
|
||
<pre><code class="lang-bash"> --conf spark.kubernetes.driverEnv.SPARK_USER=hadoop
|
||
--conf spark.kubernetes.driverEnv.HADOOP_USER_NAME=hadoop
|
||
--conf spark.executorEnv.HADOOP_USER_NAME=hadoop
|
||
--conf spark.executorEnv.SPARK_USER=hadoop
|
||
</code></pre>
|
||
<p>使用 hadoop 用户提交本地 jar 包的命令示例:</p>
|
||
<pre><code class="lang-bash">./spark-submit \
|
||
--deploy-mode cluster \
|
||
--class com.talkingdata.alluxio.hadooptest \
|
||
--master k8s://https://172.20.0.113:6443 \
|
||
--kubernetes-namespace spark-cluster \
|
||
--conf spark.kubernetes.driverEnv.SPARK_USER=hadoop \
|
||
--conf spark.kubernetes.driverEnv.HADOOP_USER_NAME=hadoop \
|
||
--conf spark.executorEnv.HADOOP_USER_NAME=hadoop \
|
||
--conf spark.executorEnv.SPARK_USER=hadoop \
|
||
--conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \
|
||
--conf spark.executor.instances=5 \
|
||
--conf spark.app.name=spark-pi \
|
||
--conf spark.kubernetes.driver.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-driver:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.executor.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-executor:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.initcontainer.docker.image=sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-init:v2.1.0-kubernetes-0.3.1-1 \
|
||
--conf spark.kubernetes.resourceStagingServer.uri=http://172.20.0.114:31000 \
|
||
~/Downloads/tendcloud_2.10-1.0.jar
|
||
</code></pre>
|
||
<p>详见:<a href="https://github.com/apache-spark-on-k8s/spark/issues/408" target="_blank">https://github.com/apache-spark-on-k8s/spark/issues/408</a></p>
|
||
<h2 id="参考">参考</h2>
|
||
<p><a href="http://lxw1234.com/archives/2015/12/593.htm" target="_blank">Spark动态资源分配-Dynamic Resource Allocation</a></p>
|
||
<p><a href="https://apache-spark-on-k8s.github.io/userdocs/running-on-kubernetes.html" target="_blank">Running Spark on Kubernetes</a></p>
|
||
<p><a href="https://issues.apache.org/jira/browse/SPARK-18278" target="_blank">Apache Spark Jira Issue - 18278 - SPIP: Support native submission of spark jobs to a kubernetes cluster</a></p>
|
||
<p><a href="https://github.com/kubernetes/kubernetes/issues/34377" target="_blank">Kubernetes Github Issue - 34377 Support Spark natively in Kubernetes</a></p>
|
||
<p><a href="https://github.com/kubernetes/kubernetes/tree/master/examples/spark" target="_blank">Kubernetes example spark</a></p>
|
||
<p><a href="https://github.com/rootsongjc/spark-on-kubernetes" target="_blank">https://github.com/rootsongjc/spark-on-kubernetes</a></p>
|
||
<p><a href="https://github.com/apache-spark-on-k8s/spark/blob/branch-2.2-kubernetes/resource-managers/kubernetes/architecture-docs/scheduler-backend.md" target="_blank">Scheduler backend</a></p>
|
||
<footer class="page-footer-ex"> <span class="page-footer-ex-copyright"> © All Rights Reserved </span>            <span class="page-footer-ex-footer-update"> updated 2017-09-19 19:32:49 </span> </footer>
|
||
|
||
</section>
|
||
|
||
</div>
|
||
<div class="search-results">
|
||
<div class="has-results">
|
||
|
||
<h1 class="search-results-title"><span class='search-results-count'></span> results matching "<span class='search-query'></span>"</h1>
|
||
<ul class="search-results-list"></ul>
|
||
|
||
</div>
|
||
<div class="no-results">
|
||
|
||
<h1 class="search-results-title">No results matching "<span class='search-query'></span>"</h1>
|
||
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
</div>
|
||
</div>
|
||
|
||
</div>
|
||
|
||
|
||
|
||
<a href="spark-standalone-on-kubernetes.html" class="navigation navigation-prev " aria-label="Previous page: 5.2.1 Spark standalone on Kubernetes">
|
||
<i class="fa fa-angle-left"></i>
|
||
</a>
|
||
|
||
|
||
<a href="serverless.html" class="navigation navigation-next " aria-label="Next page: 5.3 Serverless架构">
|
||
<i class="fa fa-angle-right"></i>
|
||
</a>
|
||
|
||
|
||
|
||
</div>
|
||
|
||
<script>
|
||
var gitbook = gitbook || [];
|
||
gitbook.push(function() {
|
||
gitbook.page.hasChanged({"page":{"title":"5.2.2 运行支持kubernetes原生调度的Spark程序","level":"1.5.3.2","depth":3,"next":{"title":"5.3 Serverless架构","level":"1.5.4","depth":2,"path":"usecases/serverless.md","ref":"usecases/serverless.md","articles":[]},"previous":{"title":"5.2.1 Spark standalone on Kubernetes","level":"1.5.3.1","depth":3,"path":"usecases/spark-standalone-on-kubernetes.md","ref":"usecases/spark-standalone-on-kubernetes.md","articles":[]},"dir":"ltr"},"config":{"plugins":["github","codesnippet","splitter","page-toc-button","image-captions","page-footer-ex","editlink","back-to-top-button","-lunr","-search","search-plus"],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"github":{"url":"https://github.com/rootsongjc/kubernetes-handbook"},"editlink":{"label":"编辑本页","multilingual":false,"base":"https://github.com/rootsongjc/kubernetes-handbook/blob/master/"},"page-footer-ex":{"copyright":"© All Rights Reserved","markdown":false,"update_format":"YYYY-MM-DD HH:mm:ss","update_label":"updated"},"splitter":{},"codesnippet":{},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"page-toc-button":{},"back-to-top-button":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"search-plus":{},"image-captions":{"variable_name":"_pictures"}},"page-footer-ex":{"copyright":"Jimmy Song","update_label":"最后更新于:","update_format":"YYYY-MM-DD HH:mm:ss"},"theme":"default","author":"Jimmy Song","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{"_pictures":[{"backlink":"concepts/index.html#fig1.2.1","level":"1.2","list_caption":"Figure: Borg架构","alt":"Borg架构","nro":1,"url":"../images/borg.png","index":1,"caption_template":"Figure: _CAPTION_","label":"Borg架构","attributes":{},"skip":false,"key":"1.2.1"},{"backlink":"concepts/index.html#fig1.2.2","level":"1.2","list_caption":"Figure: Kubernetes架构","alt":"Kubernetes架构","nro":2,"url":"../images/architecture.png","index":2,"caption_template":"Figure: _CAPTION_","label":"Kubernetes架构","attributes":{},"skip":false,"key":"1.2.2"},{"backlink":"concepts/index.html#fig1.2.3","level":"1.2","list_caption":"Figure: kubernetes整体架构示意图","alt":"kubernetes整体架构示意图","nro":3,"url":"../images/kubernetes-whole-arch.png","index":3,"caption_template":"Figure: _CAPTION_","label":"kubernetes整体架构示意图","attributes":{},"skip":false,"key":"1.2.3"},{"backlink":"concepts/index.html#fig1.2.4","level":"1.2","list_caption":"Figure: Kubernetes master架构示意图","alt":"Kubernetes master架构示意图","nro":4,"url":"../images/kubernetes-master-arch.png","index":4,"caption_template":"Figure: _CAPTION_","label":"Kubernetes master架构示意图","attributes":{},"skip":false,"key":"1.2.4"},{"backlink":"concepts/index.html#fig1.2.5","level":"1.2","list_caption":"Figure: kubernetes node架构示意图","alt":"kubernetes node架构示意图","nro":5,"url":"../images/kubernetes-node-arch.png","index":5,"caption_template":"Figure: _CAPTION_","label":"kubernetes node架构示意图","attributes":{},"skip":false,"key":"1.2.5"},{"backlink":"concepts/index.html#fig1.2.6","level":"1.2","list_caption":"Figure: Kubernetes分层架构示意图","alt":"Kubernetes分层架构示意图","nro":6,"url":"../images/kubernetes-layers-arch.jpg","index":6,"caption_template":"Figure: _CAPTION_","label":"Kubernetes分层架构示意图","attributes":{},"skip":false,"key":"1.2.6"},{"backlink":"concepts/concepts.html#fig1.2.1.1","level":"1.2.1","list_caption":"Figure: 分层架构示意图","alt":"分层架构示意图","nro":7,"url":"../images/kubernetes-layers-arch.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"分层架构示意图","attributes":{},"skip":false,"key":"1.2.1.1"},{"backlink":"concepts/pod-overview.html#fig1.2.2.1.1","level":"1.2.2.1","list_caption":"Figure: pod diagram","alt":"pod diagram","nro":8,"url":"../images/pod-overview.png","index":1,"caption_template":"Figure: _CAPTION_","label":"pod diagram","attributes":{},"skip":false,"key":"1.2.2.1.1"},{"backlink":"concepts/pod.html#fig1.2.2.1.1.1","level":"1.2.2.1.1","list_caption":"Figure: Pod示意图","alt":"Pod示意图","nro":9,"url":"../images/pod-overview.png","index":1,"caption_template":"Figure: _CAPTION_","label":"Pod示意图","attributes":{},"skip":false,"key":"1.2.2.1.1.1"},{"backlink":"concepts/pod.html#fig1.2.2.1.1.2","level":"1.2.2.1.1","list_caption":"Figure: Pod Cheatsheet","alt":"Pod Cheatsheet","nro":10,"url":"../images/kubernetes-pod-cheatsheet.png","index":2,"caption_template":"Figure: _CAPTION_","label":"Pod Cheatsheet","attributes":{},"skip":false,"key":"1.2.2.1.1.2"},{"backlink":"concepts/service.html#fig1.2.2.4.1","level":"1.2.2.4","list_caption":"Figure: userspace代理模式下Service概览图","alt":"userspace代理模式下Service概览图","nro":11,"url":"https://d33wubrfki0l68.cloudfront.net/b8e1022c2dd815d8dd36b1bc4f0cc3ad870a924f/1dd12/images/docs/services-userspace-overview.svg","index":1,"caption_template":"Figure: _CAPTION_","label":"userspace代理模式下Service概览图","attributes":{},"skip":false,"key":"1.2.2.4.1"},{"backlink":"concepts/service.html#fig1.2.2.4.2","level":"1.2.2.4","list_caption":"Figure: iptables代理模式下Service概览图","alt":"iptables代理模式下Service概览图","nro":12,"url":"https://d33wubrfki0l68.cloudfront.net/837afa5715eb31fb9ca6516ec6863e810f437264/42951/images/docs/services-iptables-overview.svg","index":2,"caption_template":"Figure: _CAPTION_","label":"iptables代理模式下Service概览图","attributes":{},"skip":false,"key":"1.2.2.4.2"},{"backlink":"concepts/deployment.html#fig1.2.2.6.1","level":"1.2.2.6","list_caption":"Figure: kubernetes deployment cheatsheet","alt":"kubernetes deployment cheatsheet","nro":13,"url":"../images/deployment-cheatsheet.png","index":1,"caption_template":"Figure: _CAPTION_","label":"kubernetes deployment cheatsheet","attributes":{},"skip":false,"key":"1.2.2.6.1"},{"backlink":"concepts/horizontal-pod-autoscaling.html#fig1.2.2.16.1","level":"1.2.2.16","list_caption":"Figure: horizontal-pod-autoscaler","alt":"horizontal-pod-autoscaler","nro":14,"url":"../images/horizontal-pod-autoscaler.png","index":1,"caption_template":"Figure: _CAPTION_","label":"horizontal-pod-autoscaler","attributes":{},"skip":false,"key":"1.2.2.16.1"},{"backlink":"concepts/label.html#fig1.2.2.17.1","level":"1.2.2.17","list_caption":"Figure: label示意图","alt":"label示意图","nro":15,"url":"../images/labels.png","index":1,"caption_template":"Figure: _CAPTION_","label":"label示意图","attributes":{},"skip":false,"key":"1.2.2.17.1"},{"backlink":"guide/using-kubectl.html#fig1.3.2.1.1","level":"1.3.2.1","list_caption":"Figure: kubectl cheatsheet","alt":"kubectl cheatsheet","nro":16,"url":"../images/kubernetes-kubectl-cheatsheet.png","index":1,"caption_template":"Figure: _CAPTION_","label":"kubectl cheatsheet","attributes":{},"skip":false,"key":"1.3.2.1.1"},{"backlink":"guide/using-kubectl.html#fig1.3.2.1.2","level":"1.3.2.1","list_caption":"Figure: kube-shell页面","alt":"kube-shell页面","nro":17,"url":"../images/kube-shell.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"kube-shell页面","attributes":{},"skip":false,"key":"1.3.2.1.2"},{"backlink":"guide/ip-masq-agent.html#fig1.3.3.6.1","level":"1.3.3.6","list_caption":"Figure: IP伪装代理示意图","alt":"IP伪装代理示意图","nro":18,"url":"../images/ip-masq.png","index":1,"caption_template":"Figure: _CAPTION_","label":"IP伪装代理示意图","attributes":{},"skip":false,"key":"1.3.3.6.1"},{"backlink":"guide/deploy-applications-in-kubernetes.html#fig1.3.5.1.1","level":"1.3.5.1","list_caption":"Figure: API","alt":"API","nro":19,"url":"../images/k8s-app-monitor-test-api-doc.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"API","attributes":{},"skip":false,"key":"1.3.5.1.1"},{"backlink":"guide/deploy-applications-in-kubernetes.html#fig1.3.5.1.2","level":"1.3.5.1","list_caption":"Figure: wercker","alt":"wercker","nro":20,"url":"../images/k8s-app-monitor-agent-wercker.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"wercker","attributes":{},"skip":false,"key":"1.3.5.1.2"},{"backlink":"guide/deploy-applications-in-kubernetes.html#fig1.3.5.1.3","level":"1.3.5.1","list_caption":"Figure: 图表","alt":"图表","nro":21,"url":"../images/k8s-app-monitor-agent.jpg","index":3,"caption_template":"Figure: _CAPTION_","label":"图表","attributes":{},"skip":false,"key":"1.3.5.1.3"},{"backlink":"guide/migrating-hadoop-yarn-to-kubernetes.html#fig1.3.5.2.1","level":"1.3.5.2","list_caption":"Figure: spark on yarn with kubernetes","alt":"spark on yarn with kubernetes","nro":22,"url":"../images/spark-on-yarn-with-kubernetes.png","index":1,"caption_template":"Figure: _CAPTION_","label":"spark on yarn with kubernetes","attributes":{},"skip":false,"key":"1.3.5.2.1"},{"backlink":"guide/migrating-hadoop-yarn-to-kubernetes.html#fig1.3.5.2.2","level":"1.3.5.2","list_caption":"Figure: Terms","alt":"Terms","nro":23,"url":"../images/terms-in-kubernetes-app-deployment.png","index":2,"caption_template":"Figure: _CAPTION_","label":"Terms","attributes":{},"skip":false,"key":"1.3.5.2.2"},{"backlink":"guide/migrating-hadoop-yarn-to-kubernetes.html#fig1.3.5.2.3","level":"1.3.5.2","list_caption":"Figure: 分解步骤解析","alt":"分解步骤解析","nro":24,"url":"../images/migrating-hadoop-yarn-to-kubernetes.png","index":3,"caption_template":"Figure: _CAPTION_","label":"分解步骤解析","attributes":{},"skip":false,"key":"1.3.5.2.3"},{"backlink":"practice/node-installation.html#fig1.4.1.6.1","level":"1.4.1.6","list_caption":"Figure: welcome-nginx","alt":"welcome-nginx","nro":25,"url":"http://olz1di9xf.bkt.clouddn.com/kubernetes-installation-test-nginx.png","index":1,"caption_template":"Figure: _CAPTION_","label":"welcome-nginx","attributes":{},"skip":false,"key":"1.4.1.6.1"},{"backlink":"practice/dashboard-addon-installation.html#fig1.4.1.8.1","level":"1.4.1.8","list_caption":"Figure: kubernetes-dashboard","alt":"kubernetes-dashboard","nro":26,"url":"http://olz1di9xf.bkt.clouddn.com/kubernetes-dashboard-raw.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"kubernetes-dashboard","attributes":{},"skip":false,"key":"1.4.1.8.1"},{"backlink":"practice/heapster-addon-installation.html#fig1.4.1.9.1","level":"1.4.1.9","list_caption":"Figure: dashboard-heapster","alt":"dashboard-heapster","nro":27,"url":"../images/kubernetes-dashboard-with-heapster.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"dashboard-heapster","attributes":{},"skip":false,"key":"1.4.1.9.1"},{"backlink":"practice/heapster-addon-installation.html#fig1.4.1.9.2","level":"1.4.1.9","list_caption":"Figure: grafana","alt":"grafana","nro":28,"url":"../images/kubernetes-heapster-grafana.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"grafana","attributes":{},"skip":false,"key":"1.4.1.9.2"},{"backlink":"practice/heapster-addon-installation.html#fig1.4.1.9.3","level":"1.4.1.9","list_caption":"Figure: kubernetes-influxdb-heapster","alt":"kubernetes-influxdb-heapster","nro":29,"url":"../images/kubernetes-influxdb-heapster.jpg","index":3,"caption_template":"Figure: _CAPTION_","label":"kubernetes-influxdb-heapster","attributes":{},"skip":false,"key":"1.4.1.9.3"},{"backlink":"practice/efk-addon-installation.html#fig1.4.1.10.1","level":"1.4.1.10","list_caption":"Figure: es-setting","alt":"es-setting","nro":30,"url":"../images/es-setting.png","index":1,"caption_template":"Figure: _CAPTION_","label":"es-setting","attributes":{},"skip":false,"key":"1.4.1.10.1"},{"backlink":"practice/efk-addon-installation.html#fig1.4.1.10.2","level":"1.4.1.10","list_caption":"Figure: es-home","alt":"es-home","nro":31,"url":"../images/kubernetes-efk-kibana.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"es-home","attributes":{},"skip":false,"key":"1.4.1.10.2"},{"backlink":"practice/traefik-ingress-installation.html#fig1.4.2.1.1","level":"1.4.2.1","list_caption":"Figure: kubernetes-dashboard","alt":"kubernetes-dashboard","nro":32,"url":"../images/traefik-dashboard.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"kubernetes-dashboard","attributes":{},"skip":false,"key":"1.4.2.1.1"},{"backlink":"practice/traefik-ingress-installation.html#fig1.4.2.1.2","level":"1.4.2.1","list_caption":"Figure: traefik-nginx","alt":"traefik-nginx","nro":33,"url":"../images/traefik-nginx.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"traefik-nginx","attributes":{},"skip":false,"key":"1.4.2.1.2"},{"backlink":"practice/traefik-ingress-installation.html#fig1.4.2.1.3","level":"1.4.2.1","list_caption":"Figure: traefik-guestbook","alt":"traefik-guestbook","nro":34,"url":"../images/traefik-guestbook.jpg","index":3,"caption_template":"Figure: _CAPTION_","label":"traefik-guestbook","attributes":{},"skip":false,"key":"1.4.2.1.3"},{"backlink":"practice/distributed-load-test.html#fig1.4.2.2.1","level":"1.4.2.2","list_caption":"Figure: traefik-dashboard-locust","alt":"traefik-dashboard-locust","nro":35,"url":"../images/traefik-dashboard-locust.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"traefik-dashboard-locust","attributes":{},"skip":false,"key":"1.4.2.2.1"},{"backlink":"practice/distributed-load-test.html#fig1.4.2.2.2","level":"1.4.2.2","list_caption":"Figure: locust-start-swarming","alt":"locust-start-swarming","nro":36,"url":"../images/locust-start-swarming.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"locust-start-swarming","attributes":{},"skip":false,"key":"1.4.2.2.2"},{"backlink":"practice/distributed-load-test.html#fig1.4.2.2.3","level":"1.4.2.2","list_caption":"Figure: sample-webapp-rc","alt":"sample-webapp-rc","nro":37,"url":"../images/sample-webapp-rc.jpg","index":3,"caption_template":"Figure: _CAPTION_","label":"sample-webapp-rc","attributes":{},"skip":false,"key":"1.4.2.2.3"},{"backlink":"practice/distributed-load-test.html#fig1.4.2.2.4","level":"1.4.2.2","list_caption":"Figure: locust-dashboard","alt":"locust-dashboard","nro":38,"url":"../images/locust-dashboard.jpg","index":4,"caption_template":"Figure: _CAPTION_","label":"locust-dashboard","attributes":{},"skip":false,"key":"1.4.2.2.4"},{"backlink":"practice/network-and-cluster-perfermance-test.html#fig1.4.2.3.1","level":"1.4.2.3","list_caption":"Figure: kubernetes-dashboard","alt":"kubernetes-dashboard","nro":39,"url":"http://olz1di9xf.bkt.clouddn.com/kubenetes-e2e-test.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"kubernetes-dashboard","attributes":{},"skip":false,"key":"1.4.2.3.1"},{"backlink":"practice/network-and-cluster-perfermance-test.html#fig1.4.2.3.2","level":"1.4.2.3","list_caption":"Figure: locust-test","alt":"locust-test","nro":40,"url":"http://olz1di9xf.bkt.clouddn.com/kubernetes-locust-test.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"locust-test","attributes":{},"skip":false,"key":"1.4.2.3.2"},{"backlink":"practice/edge-node-configuration.html#fig1.4.2.4.1","level":"1.4.2.4","list_caption":"Figure: 边缘节点架构","alt":"边缘节点架构","nro":41,"url":"../images/kubernetes-edge-node-architecture.png","index":1,"caption_template":"Figure: _CAPTION_","label":"边缘节点架构","attributes":{},"skip":false,"key":"1.4.2.4.1"},{"backlink":"practice/app-log-collection.html#fig1.4.3.2.1","level":"1.4.3.2","list_caption":"Figure: logstash日志收集架构图","alt":"logstash日志收集架构图","nro":42,"url":"../images/filebeat-log-collector.png","index":1,"caption_template":"Figure: _CAPTION_","label":"logstash日志收集架构图","attributes":{},"skip":false,"key":"1.4.3.2.1"},{"backlink":"practice/app-log-collection.html#fig1.4.3.2.2","level":"1.4.3.2","list_caption":"Figure: Kibana页面","alt":"Kibana页面","nro":43,"url":"../images/filebeat-docker-test.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"Kibana页面","attributes":{},"skip":false,"key":"1.4.3.2.2"},{"backlink":"practice/app-log-collection.html#fig1.4.3.2.3","level":"1.4.3.2","list_caption":"Figure: filebeat收集的日志详细信息","alt":"filebeat收集的日志详细信息","nro":44,"url":"../images/kubernetes-filebeat-detail.png","index":3,"caption_template":"Figure: _CAPTION_","label":"filebeat收集的日志详细信息","attributes":{},"skip":false,"key":"1.4.3.2.3"},{"backlink":"practice/monitor.html#fig1.4.3.4.1","level":"1.4.3.4","list_caption":"Figure: Kubernetes集群中的监控","alt":"Kubernetes集群中的监控","nro":45,"url":"../images/monitoring-in-kubernetes.png","index":1,"caption_template":"Figure: _CAPTION_","label":"Kubernetes集群中的监控","attributes":{},"skip":false,"key":"1.4.3.4.1"},{"backlink":"practice/monitor.html#fig1.4.3.4.2","level":"1.4.3.4","list_caption":"Figure: kubernetes的容器命名规则示意图","alt":"kubernetes的容器命名规则示意图","nro":46,"url":"../images/kubernetes-container-naming-rule.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"kubernetes的容器命名规则示意图","attributes":{},"skip":false,"key":"1.4.3.4.2"},{"backlink":"practice/monitor.html#fig1.4.3.4.3","level":"1.4.3.4","list_caption":"Figure: Heapster架构图(改进版)","alt":"Heapster架构图(改进版)","nro":47,"url":"../images/kubernetes-heapster-monitoring.png","index":3,"caption_template":"Figure: _CAPTION_","label":"Heapster架构图(改进版)","attributes":{},"skip":false,"key":"1.4.3.4.3"},{"backlink":"practice/monitor.html#fig1.4.3.4.4","level":"1.4.3.4","list_caption":"Figure: 应用监控架构图","alt":"应用监控架构图","nro":48,"url":"../images/kubernetes-app-monitoring.png","index":4,"caption_template":"Figure: _CAPTION_","label":"应用监控架构图","attributes":{},"skip":false,"key":"1.4.3.4.4"},{"backlink":"practice/monitor.html#fig1.4.3.4.5","level":"1.4.3.4","list_caption":"Figure: 应用拓扑图","alt":"应用拓扑图","nro":49,"url":"../images/weave-scope-service-topology.jpg","index":5,"caption_template":"Figure: _CAPTION_","label":"应用拓扑图","attributes":{},"skip":false,"key":"1.4.3.4.5"},{"backlink":"practice/jenkins-ci-cd.html#fig1.4.3.5.1","level":"1.4.3.5","list_caption":"Figure: 基于Jenkins的持续集成与发布","alt":"基于Jenkins的持续集成与发布","nro":50,"url":"../images/kubernetes-jenkins-ci-cd.png","index":1,"caption_template":"Figure: _CAPTION_","label":"基于Jenkins的持续集成与发布","attributes":{},"skip":false,"key":"1.4.3.5.1"},{"backlink":"practice/data-persistence-problem.html#fig1.4.3.6.1","level":"1.4.3.6","list_caption":"Figure: 日志持久化收集解决方案示意图","alt":"日志持久化收集解决方案示意图","nro":51,"url":"../images/log-persistence-logstash.png","index":1,"caption_template":"Figure: _CAPTION_","label":"日志持久化收集解决方案示意图","attributes":{},"skip":false,"key":"1.4.3.6.1"},{"backlink":"practice/storage-for-containers-using-glusterfs-with-openshift.html#fig1.4.4.1.2.1","level":"1.4.4.1.2","list_caption":"Figure: Screen Shot 2017-03-23 at 21.50.34","alt":"Screen Shot 2017-03-23 at 21.50.34","nro":52,"url":"https://keithtenzer.files.wordpress.com/2017/03/screen-shot-2017-03-23-at-21-50-34.png?w=440","index":1,"caption_template":"Figure: _CAPTION_","label":"Screen Shot 2017-03-23 at 21.50.34","attributes":{},"skip":false,"key":"1.4.4.1.2.1"},{"backlink":"practice/storage-for-containers-using-glusterfs-with-openshift.html#fig1.4.4.1.2.2","level":"1.4.4.1.2","list_caption":"Figure: Screen Shot 2017-03-24 at 11.09.34.png","alt":"Screen Shot 2017-03-24 at 11.09.34.png","nro":53,"url":"https://keithtenzer.files.wordpress.com/2017/03/screen-shot-2017-03-24-at-11-09-341.png?w=440","index":2,"caption_template":"Figure: _CAPTION_","label":"Screen Shot 2017-03-24 at 11.09.34.png","attributes":{},"skip":false,"key":"1.4.4.1.2.2"},{"backlink":"usecases/service-discovery-in-microservices.html#fig1.5.1.1.1","level":"1.5.1.1","list_caption":"Figure: 微服务中的服务发现","alt":"微服务中的服务发现","nro":54,"url":"../images/service-discovery-in-microservices.png","index":1,"caption_template":"Figure: _CAPTION_","label":"微服务中的服务发现","attributes":{},"skip":false,"key":"1.5.1.1.1"},{"backlink":"usecases/service-mesh.html#fig1.5.2.1","level":"1.5.2","list_caption":"Figure: Service Mesh 架构图","alt":"Service Mesh 架构图","nro":55,"url":"../images/serivce-mesh-control-plane.png","index":1,"caption_template":"Figure: _CAPTION_","label":"Service Mesh 架构图","attributes":{},"skip":false,"key":"1.5.2.1"},{"backlink":"usecases/istio.html#fig1.5.2.1.1","level":"1.5.2.1","list_caption":"Figure: Istio架构图","alt":"Istio架构图","nro":56,"url":"../images/istio-arch.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"Istio架构图","attributes":{},"skip":false,"key":"1.5.2.1.1"},{"backlink":"usecases/istio-installation.html#fig1.5.2.1.1.1","level":"1.5.2.1.1","list_caption":"Figure: BookInfo Sample应用架构图","alt":"BookInfo Sample应用架构图","nro":57,"url":"../images/bookinfo-sample-arch.png","index":1,"caption_template":"Figure: _CAPTION_","label":"BookInfo Sample应用架构图","attributes":{},"skip":false,"key":"1.5.2.1.1.1"},{"backlink":"usecases/istio-installation.html#fig1.5.2.1.1.2","level":"1.5.2.1.1","list_caption":"Figure: BookInfo Sample页面","alt":"BookInfo Sample页面","nro":58,"url":"../images/bookinfo-sample.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"BookInfo Sample页面","attributes":{},"skip":false,"key":"1.5.2.1.1.2"},{"backlink":"usecases/istio-installation.html#fig1.5.2.1.1.3","level":"1.5.2.1.1","list_caption":"Figure: Istio Grafana界面","alt":"Istio Grafana界面","nro":59,"url":"../images/istio-grafana.jpg","index":3,"caption_template":"Figure: _CAPTION_","label":"Istio Grafana界面","attributes":{},"skip":false,"key":"1.5.2.1.1.3"},{"backlink":"usecases/istio-installation.html#fig1.5.2.1.1.4","level":"1.5.2.1.1","list_caption":"Figure: Prometheus页面","alt":"Prometheus页面","nro":60,"url":"../images/istio-prometheus.jpg","index":4,"caption_template":"Figure: _CAPTION_","label":"Prometheus页面","attributes":{},"skip":false,"key":"1.5.2.1.1.4"},{"backlink":"usecases/istio-installation.html#fig1.5.2.1.1.5","level":"1.5.2.1.1","list_caption":"Figure: Zipkin页面","alt":"Zipkin页面","nro":61,"url":"../images/istio-zipkin.jpg","index":5,"caption_template":"Figure: _CAPTION_","label":"Zipkin页面","attributes":{},"skip":false,"key":"1.5.2.1.1.5"},{"backlink":"usecases/istio-installation.html#fig1.5.2.1.1.6","level":"1.5.2.1.1","list_caption":"Figure: ServiceGraph页面","alt":"ServiceGraph页面","nro":62,"url":"../images/istio-servicegraph.jpg","index":6,"caption_template":"Figure: _CAPTION_","label":"ServiceGraph页面","attributes":{},"skip":false,"key":"1.5.2.1.1.6"},{"backlink":"usecases/linkerd.html#fig1.5.2.2.1","level":"1.5.2.2","list_caption":"Figure: source https://linkerd.io","alt":"source https://linkerd.io","nro":63,"url":"https://linkerd.io/images/diagram-individual-instance.png","index":1,"caption_template":"Figure: _CAPTION_","label":"source https://linkerd.io","attributes":{},"skip":false,"key":"1.5.2.2.1"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.1","level":"1.5.2.2.1","list_caption":"Figure: Jenkins pipeline","alt":"Jenkins pipeline","nro":64,"url":"../images/linkerd-jenkins-pipeline.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"Jenkins pipeline","attributes":{},"skip":false,"key":"1.5.2.2.1.1"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.2","level":"1.5.2.2.1","list_caption":"Figure: Jenkins config","alt":"Jenkins config","nro":65,"url":"../images/linkerd-jenkins.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"Jenkins config","attributes":{},"skip":false,"key":"1.5.2.2.1.2"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.3","level":"1.5.2.2.1","list_caption":"Figure: namerd","alt":"namerd","nro":66,"url":"../images/namerd-internal.jpg","index":3,"caption_template":"Figure: _CAPTION_","label":"namerd","attributes":{},"skip":false,"key":"1.5.2.2.1.3"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.4","level":"1.5.2.2.1","list_caption":"Figure: linkerd监控","alt":"linkerd监控","nro":67,"url":"../images/linkerd-helloworld-outgoing.jpg","index":4,"caption_template":"Figure: _CAPTION_","label":"linkerd监控","attributes":{},"skip":false,"key":"1.5.2.2.1.4"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.5","level":"1.5.2.2.1","list_caption":"Figure: linkerd监控","alt":"linkerd监控","nro":68,"url":"../images/linkerd-helloworld-incoming.jpg","index":5,"caption_template":"Figure: _CAPTION_","label":"linkerd监控","attributes":{},"skip":false,"key":"1.5.2.2.1.5"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.6","level":"1.5.2.2.1","list_caption":"Figure: linkerd性能监控","alt":"linkerd性能监控","nro":69,"url":"../images/linkerd-grafana.png","index":6,"caption_template":"Figure: _CAPTION_","label":"linkerd性能监控","attributes":{},"skip":false,"key":"1.5.2.2.1.6"},{"backlink":"usecases/linkerd-user-guide.html#fig1.5.2.2.1.7","level":"1.5.2.2.1","list_caption":"Figure: Linkerd ingress controller","alt":"Linkerd ingress controller","nro":70,"url":"../images/linkerd-ingress-controller.jpg","index":7,"caption_template":"Figure: _CAPTION_","label":"Linkerd ingress controller","attributes":{},"skip":false,"key":"1.5.2.2.1.7"},{"backlink":"usecases/spark-standalone-on-kubernetes.html#fig1.5.3.1.1","level":"1.5.3.1","list_caption":"Figure: spark master ui","alt":"spark master ui","nro":71,"url":"../images/spark-ui.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"spark master ui","attributes":{},"skip":false,"key":"1.5.3.1.1"},{"backlink":"usecases/spark-standalone-on-kubernetes.html#fig1.5.3.1.2","level":"1.5.3.1","list_caption":"Figure: zeppelin ui","alt":"zeppelin ui","nro":72,"url":"../images/zeppelin-ui.jpg","index":2,"caption_template":"Figure: _CAPTION_","label":"zeppelin ui","attributes":{},"skip":false,"key":"1.5.3.1.2"},{"backlink":"develop/client-go-sample.html#fig1.6.3.1","level":"1.6.3","list_caption":"Figure: 使用kubernetes dashboard进行故障排查","alt":"使用kubernetes dashboard进行故障排查","nro":73,"url":"../images/kubernetes-client-go-sample-update.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"使用kubernetes dashboard进行故障排查","attributes":{},"skip":false,"key":"1.6.3.1"},{"backlink":"appendix/issues.html#fig1.7.2.1","level":"1.7.2","list_caption":"Figure: pvc-storage-limit","alt":"pvc-storage-limit","nro":74,"url":"../images/pvc-storage-limit.jpg","index":1,"caption_template":"Figure: _CAPTION_","label":"pvc-storage-limit","attributes":{},"skip":false,"key":"1.7.2.1"}]},"title":"Kubernetes Handbook","language":"zh-hans","gitbook":"*","description":"Kubernetes中文指南/实践手册","image-captions":{"caption":"图片 - _CAPTION_"}},"file":{"path":"usecases/running-spark-with-kubernetes-native-scheduler.md","mtime":"2017-09-19T11:32:49.000Z","type":"markdown"},"gitbook":{"version":"3.2.2","time":"2017-09-21T04:10:18.351Z"},"basePath":"..","book":{"language":""}});
|
||
});
|
||
</script>
|
||
</div>
|
||
|
||
|
||
<script src="../gitbook/gitbook.js"></script>
|
||
<script src="../gitbook/theme.js"></script>
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-github/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-splitter/splitter.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-page-toc-button/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-editlink/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-back-to-top-button/plugin.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-search-plus/jquery.mark.min.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-search-plus/search.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-sharing/buttons.js"></script>
|
||
|
||
|
||
|
||
<script src="../gitbook/gitbook-plugin-fontsettings/fontsettings.js"></script>
|
||
|
||
|
||
|
||
</body>
|
||
</html>
|
||
|