diff --git a/README.md b/README.md index 4265f94f..7e81d907 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ Docker Compose Files === -Some typical docker compose templates. +Some typical docker compose examples. # Install Docker and Docker Compose Take ubuntu for example @@ -14,11 +14,17 @@ $ sudo pip install docker-compose See [https://docs.docker.com/compose/](https://docs.docker.com/compose/). -# templates +# Examples ## consul-discovery Using consul to make a service-discoverable architecture. +## elk_netflow +Elk cluster, with netflow support. +```sh +docker-compose scale es=3 +``` + ## mongo_cluster Start 3 mongo instance to make a replica set. @@ -38,8 +44,8 @@ Use nginx as a proxy with authentication for backend application. ## registry_mirror docker registry mirror, with redis as the backend cache. -## elk -Elk cluster, with netflow support +## spark_cluster +Spark cluster with master and worker nodes ```sh -docker-compose scale es=3 +docker-compose scale worker=2 ``` diff --git a/spark_cluster/docker-compose.yml b/spark_cluster/docker-compose.yml new file mode 100644 index 00000000..013e56ec --- /dev/null +++ b/spark_cluster/docker-compose.yml @@ -0,0 +1,26 @@ +# http://github.com/yeasy/docker-compose-files +# This compose file will start spark master node and the worker node. +# All nodes will become a cluster automatically. +# You can run: docker-compose scale worker=2 + +master: + image: sequenceiq/spark:1.4.0 + hostname: master + ports: + - "4040:4040" + - "8042:8042" + - "7077:7077" + - "8088:8088" + - "8080:8080" + restart: always + #mem_limit: 1024m + command: bash /usr/local/spark/sbin/start-master.sh && ping localhost > /dev/null + +worker: + image: sequenceiq/spark:1.4.0 + links: + - master:master + expose: + - "8081" + restart: always + command: bash /usr/local/spark/sbin/start-slave.sh spark://master:7077 && ping localhost >/dev/null