From 8a63b35f4406edb0f8fd53c76cb25c169be10d18 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Wed, 22 Feb 2017 14:14:21 -0600 Subject: [PATCH 01/23] Adding flag for docker container in kubelet w/ rkt --- roles/kubernetes/node/templates/kubelet.rkt.service.j2 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 1ccccc43d..bcc1734a4 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -49,7 +49,12 @@ ExecStart=/usr/bin/rkt run \ --mount volume=var-lib-kubelet,target=/var/lib/kubelet \ --mount volume=var-log,target=/var/log \ --stage1-from-dir=stage1-fly.aci \ +{% if kube_hyperkube_image_repo == "docker" %} + --insecure-options=image \ + docker://{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \ +{% else %} {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \ +{% endif %} --uuid-file-save=/var/run/kubelet.uuid \ --debug --exec=/kubelet -- \ $KUBE_LOGTOSTDERR \ From a5bb24b886f1878199b400c983accb96e2eef6c6 Mon Sep 17 00:00:00 2001 From: Josh Lothian Date: Wed, 22 Mar 2017 10:12:57 -0500 Subject: [PATCH 02/23] Fix docker restart in atomic In atomic, containers are left running when docker is restarted. When docker is restarted after the flannel config is put in place, the docker0 interface isn't re-IPed because docker sees the running containers and won't update the previous config. This patch kills all the running containers after docker is stopped. We can't simply `docker stop` the running containers, as they respawn before we've got a chance to stop the docker daemon, so we need to use runc to do this after dockerd is stopped. --- roles/network_plugin/flannel/handlers/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 98c93a53a..9a87e2ec2 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -10,6 +10,7 @@ - Flannel | reload systemd - Flannel | reload docker.socket - Flannel | reload docker + - Flannel | reload docker (atomic) - Flannel | pause while Docker restarts - Flannel | wait for docker @@ -26,6 +27,11 @@ service: name: docker state: restarted + when: not is_atomic + +- name: Flannel | reload docker (atomic) + shell: systemctl stop docker && runc list | awk '!/ID/ {print $1}' | xargs -n 1 -I ID runc kill ID KILL && systemctl start docker + when: is_atomic - name: Flannel | pause while Docker restarts pause: From 30cc7c847ef69613317228b749843be1f361dfe8 Mon Sep 17 00:00:00 2001 From: Josh Lothian Date: Thu, 30 Mar 2017 17:57:40 -0500 Subject: [PATCH 03/23] Reconfigure docker restart behavior on atomic Before restarting docker, instruct it to kill running containers when it restarts. Needs a second docker restart after we restore the original behavior, otherwise the next time docker is restarted by an operator, it will unexpectedly bring down all running containers. --- .../network_plugin/flannel/handlers/main.yml | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 9a87e2ec2..e3e937a1f 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -9,7 +9,9 @@ notify: - Flannel | reload systemd - Flannel | reload docker.socket + - Flannel | reconfigure docker restart behavior (atomic) - Flannel | reload docker + - Flannel | restore docker restart behavior (atomic) - Flannel | reload docker (atomic) - Flannel | pause while Docker restarts - Flannel | wait for docker @@ -23,14 +25,29 @@ state: restarted when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] +- name: Flannel | reconfigure docker restart behavior (atomic) + replace: + name: /etc/docker/daemon.json + regexp: '"live-restore":.*true' + replace: '"live-restore": false' + when: is_atomic + - name: Flannel | reload docker service: name: docker state: restarted - when: not is_atomic + +- name: Flannel | restore docker restart behavior (atomic) + replace: + name: /etc/docker/daemon.json + regexp: '"live-restore": false' + replace: '"live-restore": true' + when: is_atomic - name: Flannel | reload docker (atomic) - shell: systemctl stop docker && runc list | awk '!/ID/ {print $1}' | xargs -n 1 -I ID runc kill ID KILL && systemctl start docker + service: + name: docker + state: restarted when: is_atomic - name: Flannel | pause while Docker restarts From 9ee0600a7f5212e602464544d046518522fe90d4 Mon Sep 17 00:00:00 2001 From: Josh Lothian Date: Fri, 31 Mar 2017 07:46:21 -0500 Subject: [PATCH 04/23] Update handler names and explanation --- roles/network_plugin/flannel/handlers/main.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index e3e937a1f..412563394 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -4,14 +4,18 @@ failed_when: false notify: Flannel | restart docker +# special cases for atomic because it defaults to live-restore: true +# So we disable live-restore to pickup the new flannel IP. After +# we enable it, we have to restart docker again to pickup the new +# setting and restore the original behavior - name: Flannel | restart docker command: /bin/true notify: - Flannel | reload systemd - Flannel | reload docker.socket - - Flannel | reconfigure docker restart behavior (atomic) + - Flannel | configure docker live-restore true (atomic) - Flannel | reload docker - - Flannel | restore docker restart behavior (atomic) + - Flannel | configure docker live-restore false (atomic) - Flannel | reload docker (atomic) - Flannel | pause while Docker restarts - Flannel | wait for docker @@ -25,7 +29,7 @@ state: restarted when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] -- name: Flannel | reconfigure docker restart behavior (atomic) +- name: Flannel | configure docker live-restore true (atomic) replace: name: /etc/docker/daemon.json regexp: '"live-restore":.*true' @@ -37,7 +41,7 @@ name: docker state: restarted -- name: Flannel | restore docker restart behavior (atomic) +- name: Flannel | configure docker live-restore false (atomic) replace: name: /etc/docker/daemon.json regexp: '"live-restore": false' From 6f67367b5758893798d0ae157caa3feac29499c3 Mon Sep 17 00:00:00 2001 From: Josh Lothian Date: Wed, 5 Apr 2017 15:41:46 -0500 Subject: [PATCH 05/23] Leave 'live-restore' false Leave live-restore false to updates always pick up new network configuration --- roles/network_plugin/flannel/handlers/main.yml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 412563394..8fbb6a1fd 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -41,19 +41,6 @@ name: docker state: restarted -- name: Flannel | configure docker live-restore false (atomic) - replace: - name: /etc/docker/daemon.json - regexp: '"live-restore": false' - replace: '"live-restore": true' - when: is_atomic - -- name: Flannel | reload docker (atomic) - service: - name: docker - state: restarted - when: is_atomic - - name: Flannel | pause while Docker restarts pause: seconds: 10 From ef8d3f684f5bd4dc3999291a0493d0c1c736612b Mon Sep 17 00:00:00 2001 From: Josh Lothian Date: Fri, 19 May 2017 09:45:46 -0500 Subject: [PATCH 06/23] Remove unused handler Previous patch removed the step that sets live-restore back to false, so don't try to notify that handler any more --- roles/network_plugin/flannel/handlers/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 8fbb6a1fd..a84d70c70 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -15,7 +15,6 @@ - Flannel | reload docker.socket - Flannel | configure docker live-restore true (atomic) - Flannel | reload docker - - Flannel | configure docker live-restore false (atomic) - Flannel | reload docker (atomic) - Flannel | pause while Docker restarts - Flannel | wait for docker From 7ae5785447e583e27cfabc28396e17894cd25d76 Mon Sep 17 00:00:00 2001 From: Josh Lothian Date: Fri, 19 May 2017 09:50:10 -0500 Subject: [PATCH 07/23] Removed the other unused handler With live-restore: true, we don't need a special docker restart --- roles/network_plugin/flannel/handlers/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index a84d70c70..bd4058976 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -15,7 +15,6 @@ - Flannel | reload docker.socket - Flannel | configure docker live-restore true (atomic) - Flannel | reload docker - - Flannel | reload docker (atomic) - Flannel | pause while Docker restarts - Flannel | wait for docker From 18a42e4b38939017a2e34dbd1da042fc7ef10f15 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Wed, 24 May 2017 15:49:21 -0400 Subject: [PATCH 08/23] add scale.yml to do minimum needed for a node bootstrap --- docs/getting-started.md | 12 ++++++++++++ scale.yml | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 scale.yml diff --git a/docs/getting-started.md b/docs/getting-started.md index 5c61ef764..6e323d9cd 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -55,3 +55,15 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \ ``` See more details in the [ansible guide](ansible.md). + +Adding nodes +-------------------------- + +You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters. + +- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). +- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`: +``` +ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \ + --private-key=~/.ssh/private_key +``` \ No newline at end of file diff --git a/scale.yml b/scale.yml new file mode 100644 index 000000000..02e79aa37 --- /dev/null +++ b/scale.yml @@ -0,0 +1,34 @@ +--- + +##Bootstrap any new workers +- hosts: kube-node + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + gather_facts: false + vars: + ansible_ssh_pipelining: false + roles: + - { role: kargo-defaults} + - { role: bootstrap-os, tags: bootstrap-os} + +##We still have to gather facts about our masters and etcd nodes +- hosts: k8s-cluster:etcd:calico-rr + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + vars: + ansible_ssh_pipelining: true + gather_facts: true + +##Target only workers to get kubelet installed and checking in on any new nodes +- hosts: kube-node + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + roles: + - { role: kargo-defaults} + - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } + - { role: kubernetes/preinstall, tags: preinstall } + - { role: docker, tags: docker } + - role: rkt + tags: rkt + when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" + - { role: etcd, tags: etcd, etcd_cluster_setup: false } + - { role: vault, tags: vault, when: "cert_management == 'vault'"} + - { role: kubernetes/node, tags: node } + - { role: network_plugin, tags: network } From 7e2aafcc76cbda082cfe50973bd8164227289ac5 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 26 May 2017 17:32:50 -0400 Subject: [PATCH 09/23] add direct path for cert in AWS with RHEL family --- .../templates/manifests/kube-apiserver.manifest.j2 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index b0f1a2f53..982184764 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -105,6 +105,11 @@ spec: - mountPath: {{ etcd_cert_dir }} name: etcd-certs readOnly: true +{% if cloud_provider == 'aws' and ansible_os_family == 'RedHat' %} + - mountPath: /etc/ssl/certs/ca-bundle.crt + name: rhel-ca-bundle + readOnly: true +{% endif %} volumes: - hostPath: path: {{ kube_config_dir }} @@ -115,3 +120,8 @@ spec: - hostPath: path: {{ etcd_cert_dir }} name: etcd-certs +{% if cloud_provider == 'aws' and ansible_os_family == 'RedHat' %} + - hostPath: + path: /etc/ssl/certs/ca-bundle.crt + name: rhel-ca-bundle +{% endif %} \ No newline at end of file From 56b86bbfca63e04d9653c59a29b2da46d3536080 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 26 May 2017 17:47:25 -0400 Subject: [PATCH 10/23] inventory hostname for cordoning/uncordoning --- roles/upgrade/post-upgrade/tasks/main.yml | 2 +- roles/upgrade/pre-upgrade/tasks/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index bff9983ff..c32f42491 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Uncordon node - command: "{{ bin_dir }}/kubectl uncordon {{ ansible_hostname }}" + command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" delegate_to: "{{ groups['kube-master'][0] }}" when: needs_cordoning|default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index f2251375b..a2b34927f 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -14,7 +14,7 @@ {% endif %} - name: Cordon node - command: "{{ bin_dir }}/kubectl cordon {{ ansible_hostname }}" + command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}" delegate_to: "{{ groups['kube-master'][0] }}" when: needs_cordoning @@ -25,6 +25,6 @@ --ignore-daemonsets --grace-period {{ drain_grace_period }} --timeout {{ drain_timeout }} - --delete-local-data {{ ansible_hostname }} + --delete-local-data {{ inventory_hostname }} delegate_to: "{{ groups['kube-master'][0] }}" when: needs_cordoning From dd89e705f296a2495aa75a073fb0bfbde3d7937f Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 26 May 2017 17:48:56 -0400 Subject: [PATCH 11/23] don't uncordon masters --- upgrade-cluster.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 8ff4ad4b8..0b4613820 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -67,7 +67,6 @@ - { role: kubernetes/node, tags: node } - { role: kubernetes/master, tags: master } - { role: network_plugin, tags: network } - - { role: upgrade/post-upgrade, tags: post-upgrade } #Finally handle worker upgrades, based on given batch size - hosts: kube-node:!kube-master From 9b18c073b66ac0179cca4d6e002d49f67e974485 Mon Sep 17 00:00:00 2001 From: Jonas Matser Date: Sun, 28 May 2017 20:55:44 +0200 Subject: [PATCH 12/23] Adds note on versionlock to README Note to users that auto-updates break clusters that don't lock the docker version somehow. --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 94ba1716d..aa1360a77 100644 --- a/README.md +++ b/README.md @@ -57,10 +57,12 @@ Versions of supported components [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
[weave](http://weave.works/) v1.8.2
-[docker](https://www.docker.com/) v1.13.1
-[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0
+[docker](https://www.docker.com/) v1.13.1 (see note)
+[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
-Note: rkt support as docker alternative is limited to control plane (etcd and +Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). + +Note 2: rkt support as docker alternative is limited to control plane (etcd and kubelet). Docker is still used for Kubernetes cluster workloads and network plugins' related OS services. Also note, only one of the supported network plugins can be deployed for a given single cluster. From 01c0ab4f06d6ad50a64bd499fc5591e6ce4d6ce1 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Wed, 31 May 2017 08:24:24 -0400 Subject: [PATCH 13/23] check if cloud_provider is defined --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 982184764..851cca060 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -105,7 +105,7 @@ spec: - mountPath: {{ etcd_cert_dir }} name: etcd-certs readOnly: true -{% if cloud_provider == 'aws' and ansible_os_family == 'RedHat' %} +{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %} - mountPath: /etc/ssl/certs/ca-bundle.crt name: rhel-ca-bundle readOnly: true @@ -120,7 +120,7 @@ spec: - hostPath: path: {{ etcd_cert_dir }} name: etcd-certs -{% if cloud_provider == 'aws' and ansible_os_family == 'RedHat' %} +{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %} - hostPath: path: /etc/ssl/certs/ca-bundle.crt name: rhel-ca-bundle From 11ede9f872436a28d4788fe6057524c039a93a68 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 2 Jun 2017 12:24:54 -0400 Subject: [PATCH 14/23] use latest coreos-stable for testing to avoid upgrades during deployment --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index be43c4f06..1cd419951 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -256,7 +256,7 @@ before_script: .coreos_calico_sep_variables: &coreos_calico_sep_variables # stage: deploy-gce-part1 KUBE_NETWORK_PLUGIN: calico - CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315 + CLOUD_IMAGE: coreos-stable CLOUD_REGION: us-west1-b CLUSTER_MODE: separate BOOTSTRAP_OS: coreos @@ -296,7 +296,7 @@ before_script: .coreos_canal_variables: &coreos_canal_variables # stage: deploy-gce-part2 KUBE_NETWORK_PLUGIN: canal - CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315 + CLOUD_IMAGE: coreos-stable CLOUD_REGION: us-east1-b CLUSTER_MODE: default BOOTSTRAP_OS: coreos From 6fb17a813c81a1eabd003282c91d1555dce88cba Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Fri, 2 Jun 2017 18:53:47 -0400 Subject: [PATCH 15/23] Support provisioning vagrant k8s clusters with coreos --- Vagrantfile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index b769199b1..e133bdc31 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -7,6 +7,13 @@ Vagrant.require_version ">= 1.8.0" CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb") +SUPPORTED_OS = { + "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos"}, + "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos"}, + "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos"}, + "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu"}, +} + # Defaults for config options defined in CONFIG $num_instances = 3 $instance_name_prefix = "k8s" @@ -16,7 +23,8 @@ $vm_cpus = 1 $shared_folders = {} $forwarded_ports = {} $subnet = "172.17.8" -$box = "bento/ubuntu-16.04" +$os = "coreos-stable" +$box = SUPPORTED_OS[$os][:box] # The first three nodes are etcd servers $etcd_instances = $num_instances # The first two nodes are masters @@ -103,6 +111,7 @@ Vagrant.configure("2") do |config| # Override the default 'calico' with flannel. # inventory/group_vars/k8s-cluster.yml "kube_network_plugin": "flannel", + "bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os] } config.vm.network :private_network, ip: ip From e7acc2fddf3bc6b72723a5bd0d831b61d052743e Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Fri, 2 Jun 2017 19:03:43 -0400 Subject: [PATCH 16/23] Update doc for Vagrant install --- Vagrantfile | 2 +- docs/vagrant.md | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index e133bdc31..2134dfa56 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -23,7 +23,7 @@ $vm_cpus = 1 $shared_folders = {} $forwarded_ports = {} $subnet = "172.17.8" -$os = "coreos-stable" +$os = "ubuntu" $box = SUPPORTED_OS[$os][:box] # The first three nodes are etcd servers $etcd_instances = $num_instances diff --git a/docs/vagrant.md b/docs/vagrant.md index 02132c140..ea08536d4 100644 --- a/docs/vagrant.md +++ b/docs/vagrant.md @@ -39,3 +39,11 @@ k8s-01 Ready 45s k8s-02 Ready 45s k8s-03 Ready 45s ``` + +Use alternative OS for Vagrant +============================== + +By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported +operating system for your local cluster. Change `$os` variable in `Vagrantfile` to another operating system to change +the vagrant base box. The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in +the `Vagrantfile`. From dad268a6864080a61b1f430f3b4f23051535a5f2 Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Fri, 2 Jun 2017 19:51:09 -0400 Subject: [PATCH 17/23] Add default ssh user for different OSes --- Vagrantfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 2134dfa56..ebb2498ba 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -8,10 +8,10 @@ Vagrant.require_version ">= 1.8.0" CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb") SUPPORTED_OS = { - "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos"}, - "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos"}, - "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos"}, - "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu"}, + "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core"}, + "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core"}, + "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core"}, + "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"}, } # Defaults for config options defined in CONFIG @@ -64,6 +64,7 @@ Vagrant.configure("2") do |config| # always use Vagrants insecure key config.ssh.insert_key = false config.vm.box = $box + config.ssh.username = SUPPORTED_OS[$os][:user] # plugin conflict if Vagrant.has_plugin?("vagrant-vbguest") then From 6d8a415b4d7a396b57b6013eb2c09df4397e03f4 Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Fri, 2 Jun 2017 20:09:37 -0400 Subject: [PATCH 18/23] Update doc on Vagrant local override file --- docs/vagrant.md | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/docs/vagrant.md b/docs/vagrant.md index ea08536d4..1b0073799 100644 --- a/docs/vagrant.md +++ b/docs/vagrant.md @@ -40,10 +40,30 @@ k8s-02 Ready 45s k8s-03 Ready 45s ``` +Customize Vagrant +================= + +You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile` +or through an override file. + +In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it. + +You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file, +e.g.: + + echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb + +and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001. + Use alternative OS for Vagrant ============================== By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported -operating system for your local cluster. Change `$os` variable in `Vagrantfile` to another operating system to change -the vagrant base box. The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in -the `Vagrantfile`. +operating system for your local cluster. + +Customize `$os` variable in `Vagrantfile` or as override, e.g.,: + + echo '$os = "coreos-stable"' >> vagrant/config.rb + + +The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`. From 66d8b2c18a841248d7c39588359369830b5d550f Mon Sep 17 00:00:00 2001 From: Kevin Jing Qiu Date: Sun, 4 Jun 2017 11:31:39 -0400 Subject: [PATCH 19/23] Specify coreos vagrant box url --- Vagrantfile | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index ebb2498ba..a2c2c1c8f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -7,10 +7,12 @@ Vagrant.require_version ">= 1.8.0" CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb") +COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" + SUPPORTED_OS = { - "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core"}, - "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core"}, - "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core"}, + "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]}, + "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]}, + "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]}, "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"}, } @@ -24,7 +26,6 @@ $shared_folders = {} $forwarded_ports = {} $subnet = "172.17.8" $os = "ubuntu" -$box = SUPPORTED_OS[$os][:box] # The first three nodes are etcd servers $etcd_instances = $num_instances # The first two nodes are masters @@ -39,6 +40,7 @@ if File.exist?(CONFIG) require CONFIG end +$box = SUPPORTED_OS[$os][:box] # if $inventory is not set, try to use example $inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory @@ -64,8 +66,10 @@ Vagrant.configure("2") do |config| # always use Vagrants insecure key config.ssh.insert_key = false config.vm.box = $box + if SUPPORTED_OS[$os].has_key? :box_url + config.vm.box_url = SUPPORTED_OS[$os][:box_url] + end config.ssh.username = SUPPORTED_OS[$os][:user] - # plugin conflict if Vagrant.has_plugin?("vagrant-vbguest") then config.vbguest.auto_update = false From 6e41634295fee1511469782cb12801c8c769f42c Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Wed, 7 Jun 2017 16:09:36 -0500 Subject: [PATCH 20/23] Set default value for kube_hyperkube_image_repo Fixes #1334 --- roles/kubernetes/node/defaults/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 7ef6d01e0..4e34dcc99 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -51,3 +51,9 @@ kubelet_load_modules: false ##Support custom flags to be passed to kubelet kubelet_custom_flags: [] + +# This setting is used for rkt based kubelet for deploying hyperkube +# from a docker based registry ( controls --insecure and docker:// ) +## Empty vaule for quay.io containers +## docker for docker registry containers +kube_hyperkube_image_repo: "" From db3e8edacd804b816766121f170f5126a6bad6ee Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Wed, 19 Apr 2017 15:52:51 +0000 Subject: [PATCH 21/23] Fixing up vault variables --- roles/etcd/tasks/gen_certs_vault.yml | 8 +++++++- roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml | 4 ++-- roles/vault/tasks/bootstrap/ca_trust.yml | 4 ++-- roles/vault/tasks/shared/sync.yml | 4 ++-- roles/vault/tasks/shared/sync_file.yml | 8 ++++---- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml index e45b2d02d..b0dbb1a4a 100644 --- a/roles/etcd/tasks/gen_certs_vault.yml +++ b/roles/etcd/tasks/gen_certs_vault.yml @@ -31,12 +31,18 @@ register: etcd_vault_login_result when: inventory_hostname == groups.etcd|first +- name: gen_certs_vault | Set fact for vault_client_token + set_fact: + vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" + delegate_to: "{{ groups['etcd'][0] }}" + - name: gen_certs_vault | Set fact for Vault API token set_fact: etcd_vault_headers: Accept: application/json Content-Type: application/json - X-Vault-Token: "{{ hostvars[groups.etcd|first]['etcd_vault_login_result']['json']['auth']['client_token'] }}" + X-Vault-Token: "{{ vault_client_token }}" + when: vault_client_token != "" # Issue master certs to Etcd nodes - include: ../../vault/tasks/shared/issue_cert.yml diff --git a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml index 9d6deb563..884f6c436 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml @@ -9,7 +9,7 @@ vars: sync_file: "{{ item }}" sync_file_dir: "{{ kube_cert_dir }}" - sync_file_group: "{{ kuber_cert_group }}" + sync_file_group: "{{ kube_cert_group }}" sync_file_hosts: "{{ groups['k8s-cluster'] }}" sync_file_is_cert: true sync_file_owner: kube @@ -29,7 +29,7 @@ vars: sync_file: ca.pem sync_file_dir: "{{ kube_cert_dir }}" - sync_file_group: "{{ kuber_cert_group }}" + sync_file_group: "{{ kube_cert_group }}" sync_file_hosts: "{{ groups['k8s-cluster'] }}" sync_file_owner: kube diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml index 57e25610b..63ab256d5 100644 --- a/roles/vault/tasks/bootstrap/ca_trust.yml +++ b/roles/vault/tasks/bootstrap/ca_trust.yml @@ -3,7 +3,7 @@ - name: bootstrap/ca_trust | pull CA from cert from groups.vault|first command: "cat {{ vault_cert_dir }}/ca.pem" register: vault_cert_file_cat - when: inventory_hostname == groups.vault|first + delegate_to: "{{ groups['vault']|first }}" # This part is mostly stolen from the etcd role - name: bootstrap/ca_trust | target ca-certificate store file @@ -19,7 +19,7 @@ - name: bootstrap/ca_trust | add CA to trusted CA dir copy: - content: "{{ hostvars[groups.vault|first]['vault_cert_file_cat']['stdout'] }}" + content: "{{ vault_cert_file_cat.get('stdout') }}" dest: "{{ ca_cert_path }}" register: vault_ca_cert diff --git a/roles/vault/tasks/shared/sync.yml b/roles/vault/tasks/shared/sync.yml index 02818b5f1..bbfedbc4c 100644 --- a/roles/vault/tasks/shared/sync.yml +++ b/roles/vault/tasks/shared/sync.yml @@ -12,11 +12,11 @@ - name: "sync_file | Set facts for file contents" set_fact: - sync_file_contents: "{{ hostvars[sync_file_srcs|first]['sync_file_cat']['stdout'] }}" + sync_file_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_cat', {}).get('stdout') }}" - name: "sync_file | Set fact for key contents" set_fact: - sync_file_key_contents: "{{ hostvars[sync_file_srcs|first]['sync_file_key_cat']['stdout'] }}" + sync_file_key_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_key_cat', {}).get('stdout') }}" when: sync_file_is_cert|d() - name: "sync_file | Ensure the directory exists" diff --git a/roles/vault/tasks/shared/sync_file.yml b/roles/vault/tasks/shared/sync_file.yml index 484d4aced..ef53e9d90 100644 --- a/roles/vault/tasks/shared/sync_file.yml +++ b/roles/vault/tasks/shared/sync_file.yml @@ -36,7 +36,7 @@ with_items: "{{ sync_file_hosts | unique }}" loop_control: loop_var: host_item - when: hostvars[host_item]["sync_file_stat"]["stat"]["exists"]|bool + when: hostvars[host_item].get("sync_file_stat", {}).get("stat", {}).get("exists") - name: "sync_file | Combine all possible key file sync sources" set_fact: @@ -44,7 +44,7 @@ with_items: "{{ sync_file_hosts | unique }}" loop_control: loop_var: host_item - when: sync_file_is_cert|d() and hostvars[host_item]["sync_file_key_stat"]["stat"]["exists"]|bool + when: sync_file_is_cert|d() and hostvars[host_item].get("sync_file_key_stat", {}).get("stat", {}).get("exists") - name: "sync_file | Remove sync sources with files that do not match sync_file_srcs|first" set_fact: @@ -52,7 +52,7 @@ when: >- sync_file_srcs|d([])|length > 1 and inventory_hostname != sync_file_srcs|first and - sync_file_stat.stat.get("checksum") != hostvars[sync_file_srcs|first]["sync_file_stat"]["stat"]["checksum"] + sync_file_stat.stat.get("checksum") != hostvars[sync_file_srcs|first].get("sync_file_stat", {}).get("stat", {}).get("checksum") - name: "sync_file | Remove sync sources with keys that do not match sync_file_srcs|first" set_fact: @@ -61,7 +61,7 @@ sync_file_is_cert|d() and sync_file_key_srcs|d([])|length > 1 and inventory_hostname != sync_file_key_srcs|first and - sync_file_key_stat.stat.checksum != hostvars[sync_file_srcs|first]["sync_file_key_stat"]["stat"]["checksum"] + sync_file_key_stat.stat.get("checksum") != hostvars[sync_file_srcs|first].get("sync_file_key_stat", {}).get("stat", {}).get("checksum") - name: "sync_file | Consolidate file and key sources" set_fact: From 91dff61008be04bcad0becdfde27d7c7e75c5dd1 Mon Sep 17 00:00:00 2001 From: Seungkyu Ahn Date: Mon, 19 Jun 2017 15:33:50 +0900 Subject: [PATCH 22/23] Fixed helm bash complete --- roles/kubernetes-apps/helm/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 1d50f8b9b..f12875da2 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -15,5 +15,5 @@ when: helm_container.changed - name: Helm | Set up bash completion - shell: "umask 022 && {{ bin_dir }}/helm completion >/etc/bash_completion.d/helm.sh" + shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh" when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] ) From bb6f727f25aaa626698688fac2faa4872635a88d Mon Sep 17 00:00:00 2001 From: vgkowski Date: Mon, 19 Jun 2017 15:48:34 +0200 Subject: [PATCH 23/23] Update openstack documentation with Calico Linked to the issue https://github.com/kubernetes-incubator/kubespray/issues/1359 --- docs/openstack.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/docs/openstack.md b/docs/openstack.md index 1a82133c0..77bb293bf 100644 --- a/docs/openstack.md +++ b/docs/openstack.md @@ -37,12 +37,8 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o Given the port ids on the left, you can set the `allowed_address_pairs` in neutron: - # allow kube_service_addresses network - neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 - neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 - - # allow kube_pods_subnet network - neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18 - neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18 + # allow kube_service_addresses and kube_pods_subnet network + neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18 + neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18 Now you can finally run the playbook.