增加spark-on-kubernetes需要用到的yaml文件和镜像编译脚本

pull/51/head
Jimmy Song 2017-09-15 19:32:29 +08:00
parent 0bbc871650
commit 6577f18131
2 changed files with 154 additions and 0 deletions

View File

@ -0,0 +1,72 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script builds and pushes docker images when run from a release of Spark
# with Kubernetes support.
declare -A path=( [spark-driver]=dockerfiles/driver/Dockerfile \
[spark-executor]=dockerfiles/executor/Dockerfile \
[spark-driver-py]=dockerfiles/driver-py/Dockerfile \
[spark-executor-py]=dockerfiles/executor-py/Dockerfile \
[spark-init]=dockerfiles/init-container/Dockerfile \
[spark-shuffle]=dockerfiles/shuffle-service/Dockerfile \
[spark-resource-staging-server]=dockerfiles/resource-staging-server/Dockerfile )
function build {
docker build -t spark-base -f dockerfiles/spark-base/Dockerfile .
for image in "${!path[@]}"; do
docker build -t ${REPO}/$image:${TAG} -f ${path[$image]} .
done
}
function push {
for image in "${!path[@]}"; do
docker push ${REPO}/$image:${TAG}
done
}
function usage {
echo "Usage: ./sbin/build-push-docker-images.sh -r <repo> -t <tag> build"
echo " ./sbin/build-push-docker-images.sh -r <repo> -t <tag> push"
echo "for example: ./sbin/build-push-docker-images.sh -r docker.io/kubespark -t v2.2.0 push"
}
if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
usage
exit 0
fi
while getopts r:t: option
do
case "${option}"
in
r) REPO=${OPTARG};;
t) TAG=${OPTARG};;
esac
done
if [ -z "$REPO" ] || [ -z "$TAG" ]; then
usage
else
case "${@: -1}" in
build) build;;
push) push;;
*) usage;;
esac
fi

View File

@ -0,0 +1,82 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: spark-resource-staging-server
namespace: spark-cluster
spec:
replicas: 1
template:
metadata:
labels:
resource-staging-server-instance: default
spec:
volumes:
- name: resource-staging-server-properties
configMap:
name: spark-resource-staging-server-config
containers:
- name: spark-resource-staging-server
image: sz-pg-oam-docker-hub-001.tendcloud.com/library/spark-resource-staging-server:v2.1.0-kubernetes-0.3.1-1
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 1000m
memory: 2560Mi
volumeMounts:
- name: resource-staging-server-properties
mountPath: '/etc/spark-resource-staging-server'
args:
- '/etc/spark-resource-staging-server/resource-staging-server.properties'
---
apiVersion: v1
kind: ConfigMap
metadata:
name: spark-resource-staging-server-config
namespace: spark-cluster
data:
resource-staging-server.properties: |
spark.kubernetes.resourceStagingServer.port=10000
spark.ssl.kubernetes.resourceStagingServer.enabled=false
# Other possible properties are listed below, primarily for setting up TLS. The paths given by KeyStore, password, and PEM files here should correspond to
# files that are securely mounted into the resource staging server container, via e.g. secret volumes.
# spark.ssl.kubernetes.resourceStagingServer.keyStore=/mnt/secrets/resource-staging-server/keyStore.jks
# spark.ssl.kubernetes.resourceStagingServer.keyStorePassword=changeit
# spark.ssl.kubernetes.resourceStagingServer.keyPassword=changeit
# spark.ssl.kubernetes.resourceStagingServer.keyStorePasswordFile=/mnt/secrets/resource-staging-server/keystore-password.txt
# spark.ssl.kubernetes.resourceStagingServer.keyPasswordFile=/mnt/secrets/resource-staging-server/keystore-key-password.txt
# spark.ssl.kubernetes.resourceStagingServer.keyPem=/mnt/secrets/resource-staging-server/key.pem
# spark.ssl.kubernetes.resourceStagingServer.serverCertPem=/mnt/secrets/resource-staging-server/cert.pem
---
apiVersion: v1
kind: Service
metadata:
name: spark-resource-staging-service
namespace: spark-cluster
spec:
type: NodePort
selector:
resource-staging-server-instance: default
ports:
- protocol: TCP
port: 10000
targetPort: 10000
nodePort: 31000