diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c27f07bca..731517683 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -168,7 +168,9 @@ before_script: # Tests Cases ## Test Master API - - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL + - > + ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL + -e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" ## Ping the between 2 pod - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL @@ -301,7 +303,7 @@ before_script: CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_MACHINE_TYPE: "n1-standard-2" CLOUD_REGION: europe-west1-b - CLUSTER_MODE: default + CLUSTER_MODE: ha KUBEADM_ENABLED: "true" STARTUP_SCRIPT: "" diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 4c01e046e..af30908c1 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -19,7 +19,8 @@ download_always_pull: False # Versions kube_version: v1.7.5 -kubeadm_version: "{{ kube_version }}" +# Change to kube_version after v1.8.0 release +kubeadm_version: "v1.8.0-beta.1" etcd_version: v3.2.4 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download @@ -38,7 +39,7 @@ kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release # Checksums etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b" -kubeadm_checksum: "378e6052f8b178f8e6a38e8637681c72d389443b66b78b51b8ddc9a162c655c3" +kubeadm_checksum: "ddd5949699d6bdbc0b90b379e7e534f137b1058db1acc8f26cc54843f017ffbf" # Containers # Possible values: host, docker diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index ddc86b2f3..3da0b6707 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -10,6 +10,11 @@ when: not is_kube_master tags: facts +- name: Check if kubelet.conf exists + stat: + path: "{{ kube_config_dir }}/kubelet.conf" + register: kubelet_conf + - name: Create kubeadm client config template: src: kubeadm-client.conf.j2 @@ -21,7 +26,13 @@ - name: Join to cluster if needed command: kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks register: kubeadm_join - when: not is_kube_master and kubeadm_client_conf.changed + when: not is_kube_master and (kubeadm_client_conf.changed or not kubelet_conf.stat.exists) + +- name: Wait for kubelet bootstrap to create config + wait_for: + path: "{{ kube_config_dir }}/kubelet.conf" + delay: 1 + timeout: 60 - name: Update server field in kubelet kubeconfig replace: diff --git a/roles/kubernetes/master/tasks/kubeadm-setup.yml b/roles/kubernetes/master/tasks/kubeadm-setup.yml index cef97e2b0..03254e481 100644 --- a/roles/kubernetes/master/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/master/tasks/kubeadm-setup.yml @@ -14,7 +14,7 @@ {{ apiserver_loadbalancer_domain_name }} {%- endif %} {%- for host in groups['kube-master'] -%} - {%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif -%} + {%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif %} {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} {%- endfor %} tags: facts @@ -29,7 +29,54 @@ dest: "{{ kube_config_dir }}/kubeadm-config.yaml" register: kubeadm_config -- name: kubeadm | Initialize cluster +- name: Check if kubeadm has already run + stat: + path: "{{ kube_config_dir }}/admin.conf" + register: admin_conf + + +- name: kubeadm | Initialize first master command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks register: kubeadm_init - when: kubeadm_config.changed + #Retry is because upload config sometimes fails + retries: 3 + when: inventory_hostname == groups['kube-master']|first and (kubeadm_config.changed or not admin_conf.stat.exists) + +- name: slurp kubeadm certs + slurp: + src: "{{ item }}" + with_items: + - "{{ kube_cert_dir }}/apiserver.crt" + - "{{ kube_cert_dir }}/apiserver.key" + - "{{ kube_cert_dir }}/apiserver-kubelet-client.crt" + - "{{ kube_cert_dir }}/apiserver-kubelet-client.key" + - "{{ kube_cert_dir }}/ca.crt" + - "{{ kube_cert_dir }}/ca.key" + - "{{ kube_cert_dir }}/front-proxy-ca.crt" + - "{{ kube_cert_dir }}/front-proxy-ca.key" + - "{{ kube_cert_dir }}/front-proxy-client.crt" + - "{{ kube_cert_dir }}/front-proxy-client.key" + - "{{ kube_cert_dir }}/sa.key" + - "{{ kube_cert_dir }}/sa.pub" + register: kubeadm_certs + delegate_to: "{{ groups['kube-master']|first }}" + run_once: true + +- name: write out kubeadm certs + copy: + dest: "{{ item.item }}" + content: "{{ item.content | b64decode }}" + owner: root + group: root + mode: 0700 + no_log: true + register: copy_kubeadm_certs + with_items: "{{ kubeadm_certs.results }}" + when: inventory_hostname != groups['kube-master']|first + +- name: kubeadm | Initialize other masters + command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks + register: kubeadm_init + #Retry is because upload config sometimes fails + retries: 3 + when: inventory_hostname != groups['kube-master']|first and (kubeadm_config.changed or not admin_conf.stat.exists or copy_kubeadm_certs.changed) diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index b6158d9e5..822323e7f 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -2,6 +2,12 @@ - include: pre-upgrade.yml tags: k8s-pre-upgrade +# upstream bug: https://github.com/kubernetes/kubeadm/issues/441 +- name: Disable kube_basic_auth until kubeadm/441 is fixed + set_fact: + kube_basic_auth: false + when: kubeadm_enabled|bool|default(false) + - include: users-file.yml when: kube_basic_auth|default(true) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index 7cac5c16e..7e906efa9 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -2,9 +2,9 @@ apiVersion: kubeadm.k8s.io/v1alpha1 kind: MasterConfiguration api: advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }} - bindPort: "{{ kube_apiserver_port }}" + bindPort: {{ kube_apiserver_port }} etcd: - endpoints: + endpoints: {% for endpoint in etcd_access_endpoint.split(',') %} - {{ endpoint }} {% endfor %} @@ -18,17 +18,19 @@ networking: kubernetesVersion: {{ kube_version }} cloudProvider: {{ cloud_provider|default('') }} #TODO: cloud provider conf file -authorizationModes: +authorizationModes: +- Node {% for mode in authorization_modes %} - {{ mode }} {% endfor %} token: {{ kubeadm_token }} -tokenTTL: {{ kubeadm_token_ttl }} +tokenTTL: "{{ kubeadm_token_ttl }}" selfHosted: false apiServerExtraArgs: insecure-bind-address: {{ kube_apiserver_insecure_bind_address }} insecure-port: "{{ kube_apiserver_insecure_port }}" admission-control: {{ kube_apiserver_admission_control | join(',') }} + apiserver-count: "{{ kube_apiserver_count }}" service-node-port-range: {{ kube_apiserver_node_port_range }} {% if kube_basic_auth|default(true) %} basic-auth-file: {{ kube_users_dir }}/known_users.csv @@ -65,3 +67,4 @@ apiServerCertSANs: - {{ san }} {% endfor %} certificatesDir: {{ kube_config_dir }}/ssl +unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index df55be4cf..236b40b5c 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -15,8 +15,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {# Base kubelet args #} {% set kubelet_args_base -%} {# start kubeadm specific settings #} +--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ --kubeconfig={{ kube_config_dir }}/kubelet.conf \ ---require-kubeconfig=true \ +--require-kubeconfig \ --authorization-mode=Webhook \ --client-ca-file={{ kube_cert_dir }}/ca.crt \ --pod-manifest-path={{ kube_manifest_dir }} \ diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 624e7135e..59251b02b 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -109,6 +109,7 @@ - "{{ bin_dir }}/etcdctl" - "{{ bin_dir }}/kubernetes-scripts" - "{{ bin_dir }}/kubectl" + - "{{ bin_dir }}/kubeadm" - "{{ bin_dir }}/helm" - "{{ bin_dir }}/calicoctl" - "{{ bin_dir }}/weave" diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index afa757719..ee90b35b2 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -35,7 +35,7 @@ register: gce - name: Add instances to host group - add_host: hostname={{item.name}} ansible_host={{item.public_ip}} groupname="waitfor_hosts" + add_host: hostname={{item.public_ip}} groupname="waitfor_hosts" with_items: '{{gce.instance_data}}' - name: Template the inventory @@ -61,4 +61,4 @@ gather_facts: false tasks: - name: Wait for SSH to come up. - local_action: wait_for host={{inventory_hostname}} port=22 delay=5 timeout=240 state=started + local_action: wait_for host={{inventory_hostname}} port=22 delay=60 timeout=240 state=started diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index 5b053fd4b..b86a537fa 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -9,3 +9,4 @@ password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}" validate_certs: no status_code: 200 + when: not kubeadm_enabled|default(false)