Flannel running as pod

pull/62/head
Smaine Kahlouch 2016-01-09 10:45:50 +01:00 committed by ant31
parent dd46cc64a4
commit 8127e8f8e8
31 changed files with 632 additions and 196 deletions

61
.gitmodules vendored
View File

@ -1,43 +1,43 @@
[submodule "roles/apps/k8s-kube-ui"] [submodule "roles/apps/k8s-kube-ui"]
path = roles/apps/k8s-kube-ui path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git url = https://github.com/ansibl8s/k8s-kube-ui.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-kubedns"] [submodule "roles/apps/k8s-kubedns"]
path = roles/apps/k8s-kubedns path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-common"] [submodule "roles/apps/k8s-common"]
path = roles/apps/k8s-common path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-redis"] [submodule "roles/apps/k8s-redis"]
path = roles/apps/k8s-redis path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-elasticsearch"] [submodule "roles/apps/k8s-elasticsearch"]
path = roles/apps/k8s-elasticsearch path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git url = https://github.com/ansibl8s/k8s-elasticsearch.git
[submodule "roles/apps/k8s-fabric8"] [submodule "roles/apps/k8s-fabric8"]
path = roles/apps/k8s-fabric8 path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-memcached"] [submodule "roles/apps/k8s-memcached"]
path = roles/apps/k8s-memcached path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git url = https://github.com/ansibl8s/k8s-memcached.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-postgres"] [submodule "roles/apps/k8s-postgres"]
path = roles/apps/k8s-postgres path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git url = https://github.com/ansibl8s/k8s-postgres.git
branch = v1.0 branch = v1.0
[submodule "roles/apps/k8s-kubedash"] [submodule "roles/apps/k8s-kubedash"]
path = roles/apps/k8s-kubedash path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git url = https://github.com/ansibl8s/k8s-kubedash.git
[submodule "roles/apps/k8s-heapster"] [submodule "roles/apps/k8s-heapster"]
path = roles/apps/k8s-heapster path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git url = https://github.com/ansibl8s/k8s-heapster.git
[submodule "roles/apps/k8s-influxdb"] [submodule "roles/apps/k8s-influxdb"]
path = roles/apps/k8s-influxdb path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git url = https://github.com/ansibl8s/k8s-influxdb.git
[submodule "roles/apps/k8s-kube-logstash"] [submodule "roles/apps/k8s-kube-logstash"]
path = roles/apps/k8s-kube-logstash path = roles/apps/k8s-kube-logstash
url = https://github.com/ansibl8s/k8s-kube-logstash.git url = https://github.com/ansibl8s/k8s-kube-logstash.git
@ -47,3 +47,6 @@
[submodule "roles/apps/k8s-rabbitmq"] [submodule "roles/apps/k8s-rabbitmq"]
path = roles/apps/k8s-rabbitmq path = roles/apps/k8s-rabbitmq
url = https://github.com/ansibl8s/k8s-rabbitmq.git url = https://github.com/ansibl8s/k8s-rabbitmq.git
[submodule "roles/apps/k8s-pgbouncer"]
path = roles/apps/k8s-pgbouncer
url = https://github.com/ansibl8s/k8s-pgbouncer.git

View File

@ -8,10 +8,14 @@ This project allows to
- A **set of roles** in order to install applications over the k8s cluster - A **set of roles** in order to install applications over the k8s cluster
- A **flexible method** which helps to create new roles for apps. - A **flexible method** which helps to create new roles for apps.
Linux distributions tested:
* **Debian** Wheezy, Jessie
* **Ubuntu** 14.10, 15.04, 15.10
* **Fedora** 23
* **CentOS** 7 (Currently with flannel only)
### Requirements ### Requirements
Tested on **Debian Wheezy/Jessie** and **Ubuntu** (14.10, 15.04, 15.10). * The target servers must have **access to the Internet** in order to pull docker imaqes.
Should work on **RedHat/Fedora/Centos** platforms (to be tested)
* The target servers must have access to the Internet in order to pull docker imaqes.
* The firewalls are not managed, you'll need to implement your own rules the way you used to. * The firewalls are not managed, you'll need to implement your own rules the way you used to.
in order to avoid any issue during deployment you should **disable your firewall** in order to avoid any issue during deployment you should **disable your firewall**
* **Copy your ssh keys** to all the servers part of your inventory. * **Copy your ssh keys** to all the servers part of your inventory.
@ -272,6 +276,53 @@ calicoctl pool show
``` ```
calicoctl endpoint show --detail calicoctl endpoint show --detail
``` ```
#### Flannel networking #### Flannel networking
* Flannel configuration file should have been created there
```
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.233.0.0/18
FLANNEL_SUBNET=10.233.16.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
```
* Check if the network interface has been created
```
ip a show dev flannel.1
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
inet 10.233.16.0/18 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```
kubectl run test --image=busybox --command -- tail -f /dev/null
replicationcontroller "test" created
kubectl describe po test-34ozs | grep ^IP
IP: 10.233.16.2
```
```
kubectl exec test-34ozs -- ip a show dev eth0
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
inet 10.233.16.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08
valid_lft forever preferred_lft forever
```
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html) Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)

@ -0,0 +1 @@
Subproject commit 61c41e80e3da8938c7896c07822c19c060be4491

View File

@ -34,7 +34,7 @@
state: directory state: directory
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']
- name: configure dnsmasq - name: Write dnsmasq configuration
template: template:
src: 01-kube-dns.conf.j2 src: 01-kube-dns.conf.j2
dest: /etc/dnsmasq.d/01-kube-dns.conf dest: /etc/dnsmasq.d/01-kube-dns.conf
@ -42,15 +42,14 @@
backup: yes backup: yes
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']
- name: create dnsmasq pod template - name: Create dnsmasq pod manifest
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']
- name: Check for dnsmasq port - name: Check for dnsmasq port (pulling image and running container)
wait_for: wait_for:
port: 53 port: 53
delay: 5 delay: 5
timeout: 100
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']
- name: check resolvconf - name: check resolvconf
@ -67,7 +66,7 @@
line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }} line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
dest: "{{resolvconffile}}" dest: "{{resolvconffile}}"
state: present state: present
insertafter: EOF insertbefore: BOF
backup: yes backup: yes
follow: yes follow: yes

View File

@ -20,14 +20,6 @@
{{ ansible_distribution }}-{{ ansible_distribution_version }} {{ ansible_distribution }}-{{ ansible_distribution_version }}
when: ansible_kernel|version_compare(docker_kernel_min_version, "<") when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
- name: ensure docker requirements packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args:
name: "{{item}}"
state: latest
update_cache: yes
with_items: docker_package_info.pre_pkgs
when: docker_package_info.pre_pkgs|length > 0
- name: ensure docker repository public key is installed - name: ensure docker repository public key is installed
action: "{{ docker_repo_key_info.pkg_key }}" action: "{{ docker_repo_key_info.pkg_key }}"

View File

@ -2,10 +2,6 @@ docker_kernel_min_version: '2.6.32-431'
docker_package_info: docker_package_info:
pkg_mgr: yum pkg_mgr: yum
pre_pkgs:
- epel-release
- curl
- device-mapper-libs
pkgs: pkgs:
- docker-io - docker-io

View File

@ -2,10 +2,6 @@ docker_kernel_min_version: '3.2'
docker_package_info: docker_package_info:
pkg_mgr: apt pkg_mgr: apt
pre_pkgs:
- apt-transport-https
- curl
- software-properties-common
pkgs: pkgs:
- docker-engine - docker-engine

View File

@ -2,8 +2,6 @@ docker_kernel_min_version: '0'
docker_package_info: docker_package_info:
pkg_mgr: yum pkg_mgr: yum
pre_pkgs:
- curl
pkgs: pkgs:
- docker-io - docker-io

View File

@ -0,0 +1,14 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: dnf
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@ -2,8 +2,6 @@ docker_kernel_min_version: '0'
docker_package_info: docker_package_info:
pkg_mgr: yum pkg_mgr: yum
pre_pkgs:
- curl
pkgs: pkgs:
- docker - docker

View File

@ -1,7 +1,6 @@
--- ---
local_release_dir: /tmp local_release_dir: /tmp
flannel_version: 0.5.5
calico_version: v0.13.0 calico_version: v0.13.0
calico_plugin_version: v0.7.0 calico_plugin_version: v0.7.0
kube_version: v1.1.3 kube_version: v1.1.3
@ -11,8 +10,6 @@ kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64" kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
flannel_download_url: "https://github.com/coreos/flannel/releases/download/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz"
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl" calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes" calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
@ -26,11 +23,6 @@ downloads:
dest: calico/bin/calico dest: calico/bin/calico
url: "{{calico_plugin_download_url}}" url: "{{calico_plugin_download_url}}"
- name: flannel
dest: flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
url: "{{flannel_download_url}}"
unarchive: yes
- name: kubernetes-kubelet - name: kubernetes-kubelet
dest: kubernetes/bin/kubelet dest: kubernetes/bin/kubelet
sha256: "{{kubelet_checksum}}" sha256: "{{kubelet_checksum}}"

View File

@ -1,13 +1,12 @@
--- ---
- name: ETCD2 | Stop etcd2 service - name: Stop etcd2 service
service: name=etcd state=stopped service: name=etcd state=stopped
ignore_errors: yes ignore_errors: yes
- name: ETCD2 | create etcd pod template - name: Create etcd pod manifest
template: src=etcd-pod.yml dest=/etc/kubernetes/manifests/etcd-pod.manifest template: src=etcd-pod.yml dest=/etc/kubernetes/manifests/etcd-pod.manifest
- name: ETCD2 | Check for etcd2 port - name: Check for etcd2 port (pulling image and running container)
wait_for: wait_for:
port: 2379 port: 2379
delay: 5 delay: 5
timeout: 100

View File

@ -265,6 +265,7 @@ _kubectl_get()
flags_completion=() flags_completion=()
flags+=("--all-namespaces") flags+=("--all-namespaces")
flags+=("--export")
flags+=("--filename=") flags+=("--filename=")
flags_with_completion+=("--filename") flags_with_completion+=("--filename")
flags_completion+=("__handle_filename_extension_flag json|yaml|yml") flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
@ -401,10 +402,204 @@ _kubectl_describe()
must_have_one_noun+=("serviceaccount") must_have_one_noun+=("serviceaccount")
} }
_kubectl_create_namespace()
{
last_command="kubectl_create_namespace"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--dry-run")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create_secret_docker-registry()
{
last_command="kubectl_create_secret_docker-registry"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--docker-email=")
flags+=("--docker-password=")
flags+=("--docker-server=")
flags+=("--docker-username=")
flags+=("--dry-run")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_flag+=("--docker-email=")
must_have_one_flag+=("--docker-password=")
must_have_one_flag+=("--docker-username=")
must_have_one_noun=()
}
_kubectl_create_secret_generic()
{
last_command="kubectl_create_secret_generic"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--dry-run")
flags+=("--from-file=")
flags+=("--from-literal=")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--type=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create_secret()
{
last_command="kubectl_create_secret"
commands=()
commands+=("docker-registry")
commands+=("generic")
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create() _kubectl_create()
{ {
last_command="kubectl_create" last_command="kubectl_create"
commands=() commands=()
commands+=("namespace")
commands+=("secret")
flags=() flags=()
two_word_flags=() two_word_flags=()
@ -945,6 +1140,125 @@ _kubectl_scale()
must_have_one_noun=() must_have_one_noun=()
} }
_kubectl_cordon()
{
last_command="kubectl_cordon"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_drain()
{
last_command="kubectl_drain"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--force")
flags+=("--grace-period=")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_uncordon()
{
last_command="kubectl_uncordon"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_attach() _kubectl_attach()
{ {
last_command="kubectl_attach" last_command="kubectl_attach"
@ -1164,6 +1478,7 @@ _kubectl_run()
two_word_flags+=("-r") two_word_flags+=("-r")
flags+=("--requests=") flags+=("--requests=")
flags+=("--restart=") flags+=("--restart=")
flags+=("--rm")
flags+=("--save-config") flags+=("--save-config")
flags+=("--service-generator=") flags+=("--service-generator=")
flags+=("--service-overrides=") flags+=("--service-overrides=")
@ -2045,6 +2360,9 @@ _kubectl()
commands+=("logs") commands+=("logs")
commands+=("rolling-update") commands+=("rolling-update")
commands+=("scale") commands+=("scale")
commands+=("cordon")
commands+=("drain")
commands+=("uncordon")
commands+=("attach") commands+=("attach")
commands+=("exec") commands+=("exec")
commands+=("port-forward") commands+=("port-forward")

View File

@ -1,13 +1,13 @@
--- ---
- name: reload systemd
command: systemctl daemon-reload
- name: restart systemd-kubelet - name: restart systemd-kubelet
command: /bin/true command: /bin/true
notify: notify:
- reload systemd - reload systemd
- restart kubelet - restart kubelet
- name: reload systemd
command: systemctl daemon-reload
- name: restart kubelet - name: restart kubelet
service: service:
name: kubelet name: kubelet

View File

@ -46,3 +46,4 @@
- name: install | Perms calico plugin binary - name: install | Perms calico plugin binary
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico owner=kube mode=0755 state=file file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico owner=kube mode=0755 state=file
when: kube_network_plugin == "calico"

View File

@ -3,13 +3,7 @@ common_required_pkgs:
- python-httplib2 - python-httplib2
- openssl - openssl
- curl - curl
- rsync
debian_required_pkgs:
- python-apt
- python-pip
rh_required_pkgs:
- libselinux-python
pypy_version: 2.4.0 pypy_version: 2.4.0
python_pypy_url: "https://bitbucket.org/pypy/pypy/downloads/pypy-{{ pypy_version }}.tar.bz2" python_pypy_url: "https://bitbucket.org/pypy/pypy/downloads/pypy-{{ pypy_version }}.tar.bz2"

View File

@ -1,4 +1,17 @@
--- ---
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
- name: "Identify init system" - name: "Identify init system"
shell: > shell: >
$(pgrep systemd > /dev/null && systemctl status > /dev/null); $(pgrep systemd > /dev/null && systemctl status > /dev/null);
@ -14,27 +27,29 @@
- set_fact: - set_fact:
init_system: "{{ init_system_output.stdout }}" init_system: "{{ init_system_output.stdout }}"
- name: Install python-apt for Debian distribs
shell: apt-get install -y python-apt
when: ansible_os_family == "Debian"
changed_when: False
- name: Install python-dnf for latest RedHat versions
shell: dnf install -y python-dnf yum
when: ansible_distribution == "Fedora" and
ansible_distribution_major_version > 21
changed_when: False
- name: Install packages requirements - name: Install packages requirements
action: action:
module: "{{ ansible_pkg_mgr }}" module: "{{ ansible_pkg_mgr }}"
name: "{{ item }}" name: "{{ item }}"
state: latest state: latest
with_items: common_required_pkgs with_items: "{{required_pkgs | union(common_required_pkgs)}}"
- name: Install debian packages requirements # Todo : selinux configuration
apt: - name: Set selinux policy to permissive
name: "{{ item }}" selinux: policy=targeted state=permissive
state: latest
when: ansible_os_family == "Debian"
with_items: debian_required_pkgs
- name: Install redhat packages requirements
action:
module: "{{ ansible_pkg_mgr }}"
name: "{{ item }}"
state: latest
when: ansible_os_family == "RedHat" when: ansible_os_family == "RedHat"
with_items: rh_required_pkgs changed_when: False
- include: python-bootstrap.yml - include: python-bootstrap.yml
when: ansible_os_family not in [ "Debian", "RedHat" ] when: ansible_os_family not in [ "Debian", "RedHat" ]

View File

@ -0,0 +1,4 @@
required_pkgs:
- epel-release
- libselinux-python
- device-mapper-libs

View File

@ -0,0 +1,4 @@
required_pkgs:
- python-apt
- apt-transport-https
- software-properties-common

View File

@ -0,0 +1,3 @@
required_pkgs:
- libselinux-python
- device-mapper-libs

View File

@ -0,0 +1,3 @@
required_pkgs:
- libselinux-python
- device-mapper-libs

View File

@ -1,36 +1,30 @@
--- ---
- name : reload systemd
shell: systemctl daemon-reload
- name: restart systemd-calico-node - name: restart systemd-calico-node
command: /bin/true command: /bin/true
notify: notify:
- reload systemd - reload systemd
- restart calico-node - restart calico-node
- name: restart systemd-docker
command: /bin/true
notify:
- reload systemd
- restart docker
- name: delete default docker bridge
command: ip link delete docker0
ignore_errors: yes
notify: restart docker
- name : reload systemd
shell: systemctl daemon-reload
- name: restart calico-node - name: restart calico-node
service: service:
name: calico-node name: calico-node
state: restarted state: restarted
- name: restart docker - name: restart docker
service: name=docker state=restarted service:
name: docker
- name: restart flannel state: restarted
service: name=flannel state=restarted
notify:
- reload systemd
- stop docker
- delete docker0
- start docker
when: inventory_hostname in groups['kube-node']
- name: stop docker
service: name=docker state=stopped
- name: delete docker0
command: ip link delete docker0
ignore_errors: yes
- name: start docker
service: name=docker state=started

View File

@ -1,5 +1,4 @@
--- ---
- name: Calico | Install calicoctl bin - name: Calico | Install calicoctl bin
synchronize: synchronize:
src: "{{ local_release_dir }}/calico/bin/calicoctl" src: "{{ local_release_dir }}/calico/bin/calicoctl"
@ -18,6 +17,10 @@
dest: /usr/bin/calicoctl dest: /usr/bin/calicoctl
state: link state: link
- wait_for:
port: 2379
when: inventory_hostname in groups['kube-master']
- name: Calico | Check if calico network pool has already been configured - name: Calico | Check if calico network pool has already been configured
uri: uri:
url: "http://127.0.0.1:2379/v2/keys/calico/v1/ipam/v4/pool" url: "http://127.0.0.1:2379/v2/keys/calico/v1/ipam/v4/pool"
@ -71,3 +74,16 @@
- name: Calico | Enable calico-node - name: Calico | Enable calico-node
service: name=calico-node enabled=yes state=started service: name=calico-node enabled=yes state=started
- name: Calico | Disable node mesh
shell: calicoctl bgp node-mesh off
environment:
ETCD_AUTHORITY: "{{ groups['etcd'][0] }}:2379"
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
- name: Calico | Configure peering with router(s)
shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
environment:
ETCD_AUTHORITY: "{{ groups['etcd'][0] }}:2379"
with_items: peers
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']

View File

@ -1,57 +1,34 @@
--- ---
- name: Create flannel user - name: Flannel | Write flannel configuration
user: name=flannel shell=/bin/nologin
- name: Install flannel binaries
synchronize:
src: "{{ local_release_dir }}/flannel/bin/flanneld"
dest: "{{ bin_dir }}/flanneld"
archive: no
times: yes
delegate_to: "{{ groups['downloader'][0] }}"
notify:
- restart flannel
- name: Perms flannel binary
file: path={{ bin_dir }}/flanneld owner=flannel mode=0755 state=file
- name: Write flannel.service systemd file
template: template:
src: flannel/systemd-flannel.service.j2 src: flannel/network.json
dest: /etc/systemd/system/flannel.service dest: /etc/flannel-network.json
notify: restart flannel backup: yes
when: inventory_hostname in groups['kube-node']
- name: Write docker.service systemd file - name: Flannel | Create flannel pod manifest
template: template:
src: flannel/systemd-docker.service.j2 src: flannel/flannel-pod.yml
dest: /lib/systemd/system/docker.service dest: /etc/kubernetes/manifests/flannel-pod.manifest
notify: restart docker notify: delete default docker bridge
- name: Set fact for ectcd command conf file location - name: Flannel | Wait for flannel subnet.env file presence
set_fact: wait_for:
conf_file: "/tmp/flannel-conf.json" path: /run/flannel/subnet.env
run_once: true delay: 5
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Create flannel config file to go in etcd - name: Get flannel_subnet from subnet.env
template: src=flannel/flannel-conf.json.j2 dest={{ conf_file }} shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_SUBNET" {print $2}'
run_once: true register: flannel_subnet_output
delegate_to: "{{ groups['kube-master'][0] }}" changed_when: false
- name: Flannel configuration into etcd - set_fact:
shell: "{{ bin_dir }}/etcdctl set /{{ cluster_name }}/network/config < {{ conf_file }}" flannel_subnet: "{{ flannel_subnet_output.stdout }}"
delegate_to: "{{ groups['kube-master'][0] }}"
notify: restart flannel
- name: Clean up the flannel config file - name: Get flannel_mtu from subnet.env
file: path=/tmp/flannel-config.json state=absent shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_MTU" {print $2}'
run_once: true register: flannel_mtu_output
delegate_to: "{{ groups['kube-master'][0] }}" changed_when: false
- name: Launch Flannel - set_fact:
service: name=flannel state=started enabled=yes flannel_mtu: "{{ flannel_mtu_output.stdout }}"
notify:
- restart flannel
- name: Enable Docker
service: name=docker enabled=yes state=started

View File

@ -7,7 +7,24 @@
- include: flannel.yml - include: flannel.yml
when: kube_network_plugin == "flannel" when: kube_network_plugin == "flannel"
- include: calico.yml - name: Set docker daemon options
when: kube_network_plugin == "calico" template:
src: docker
dest: "/etc/default/docker"
owner: root
group: root
mode: 0644
notify:
- restart docker
- name: Write docker.service systemd file
template:
src: systemd-docker.service
dest: /lib/systemd/system/docker.service
notify: restart systemd-docker
when: init_system == "systemd"
- meta: flush_handlers - meta: flush_handlers
- include: calico.yml
when: kube_network_plugin == "calico"

View File

@ -0,0 +1,6 @@
# Deployed by Ansible
{% if init_system == "sysvinit" and kube_network_plugin == "flannel" and ansible_os_family == "Debian" %}
DOCKER_OPTS="--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% elif kube_network_plugin == "flannel" %}
OPTIONS="--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% endif %}

View File

@ -0,0 +1,46 @@
---
kind: "Pod"
apiVersion: "v1"
metadata:
name: "flannel"
namespace: "kube-system"
labels:
app: "flannel"
version: "v0.1"
spec:
volumes:
- name: "subnetenv"
hostPath:
path: "/run/flannel"
- name: "networkconfig"
hostPath:
path: "/etc/flannel-network.json"
containers:
- name: "flannel-server-helper"
image: "gcr.io/google_containers/flannel-server-helper:0.1"
args:
- "--network-config=/etc/flannel-network.json"
- "--etcd-prefix=/{{ cluster_name }}/network"
- "--etcd-server=http://{{ groups['etcd'][0] }}:2379"
volumeMounts:
- name: "networkconfig"
mountPath: "/etc/flannel-network.json"
imagePullPolicy: "Always"
- name: "flannel-container"
image: "quay.io/coreos/flannel:0.5.5"
command:
- "/bin/sh"
- "-c"
- "/opt/bin/flanneld -etcd-endpoints {% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} -etcd-prefix /{{ cluster_name }}/network 1>>/var/log/flannel_server.log 2>&1"
ports:
- hostPort: 10253
containerPort: 10253
resources:
limits:
cpu: "100m"
volumeMounts:
- name: "subnetenv"
mountPath: "/run/flannel"
securityContext:
privileged: true
hostNetwork: true

View File

@ -1,17 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket flannel.service
Requires=docker.socket
[Service]
EnvironmentFile=/run/flannel/subnet.env
EnvironmentFile=-/etc/default/docker
ExecStart=/usr/bin/docker -d -H fd:// --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} $DOCKER_OPTS
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View File

@ -1,12 +0,0 @@
[Unit]
Description=Flannel Network Overlay
Documentation=https://coreos.com/flannel/docs/latest
[Service]
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/flanneld \
$FLANNEL_ETCD_PREFIX
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,28 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
{% if ansible_os_family == "RedHat" %}
After=network.target
Wants=docker-storage-setup.service
{% elif ansible_os_family == "Debian" %}
After=network.target docker.socket
Requires=docker.socket
{% endif %}
[Service]
Type=notify
EnvironmentFile=-/etc/default/docker
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/docker daemon \
$OPTIONS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$INSECURE_REGISTRY
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
MountFlags=slave
TimeoutStartSec=1min
[Install]
WantedBy=multi-user.target