Enable HA deploy of kubeadm (#1658)
* Enable HA deploy of kubeadm * raise delay to 60s for starting gce hostspull/1668/head
parent
b294db5aed
commit
8e731337ba
|
@ -168,7 +168,9 @@ before_script:
|
|||
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
- >
|
||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||
|
||||
## Ping the between 2 pod
|
||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
|
@ -301,7 +303,7 @@ before_script:
|
|||
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||
CLOUD_REGION: europe-west1-b
|
||||
CLUSTER_MODE: default
|
||||
CLUSTER_MODE: ha
|
||||
KUBEADM_ENABLED: "true"
|
||||
STARTUP_SCRIPT: ""
|
||||
|
||||
|
|
|
@ -19,7 +19,8 @@ download_always_pull: False
|
|||
|
||||
# Versions
|
||||
kube_version: v1.7.5
|
||||
kubeadm_version: "{{ kube_version }}"
|
||||
# Change to kube_version after v1.8.0 release
|
||||
kubeadm_version: "v1.8.0-beta.1"
|
||||
etcd_version: v3.2.4
|
||||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
|
@ -38,7 +39,7 @@ kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release
|
|||
|
||||
# Checksums
|
||||
etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b"
|
||||
kubeadm_checksum: "378e6052f8b178f8e6a38e8637681c72d389443b66b78b51b8ddc9a162c655c3"
|
||||
kubeadm_checksum: "ddd5949699d6bdbc0b90b379e7e534f137b1058db1acc8f26cc54843f017ffbf"
|
||||
|
||||
# Containers
|
||||
# Possible values: host, docker
|
||||
|
|
|
@ -10,6 +10,11 @@
|
|||
when: not is_kube_master
|
||||
tags: facts
|
||||
|
||||
- name: Check if kubelet.conf exists
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
register: kubelet_conf
|
||||
|
||||
- name: Create kubeadm client config
|
||||
template:
|
||||
src: kubeadm-client.conf.j2
|
||||
|
@ -21,7 +26,13 @@
|
|||
- name: Join to cluster if needed
|
||||
command: kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks
|
||||
register: kubeadm_join
|
||||
when: not is_kube_master and kubeadm_client_conf.changed
|
||||
when: not is_kube_master and (kubeadm_client_conf.changed or not kubelet_conf.stat.exists)
|
||||
|
||||
- name: Wait for kubelet bootstrap to create config
|
||||
wait_for:
|
||||
path: "{{ kube_config_dir }}/kubelet.conf"
|
||||
delay: 1
|
||||
timeout: 60
|
||||
|
||||
- name: Update server field in kubelet kubeconfig
|
||||
replace:
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
{{ apiserver_loadbalancer_domain_name }}
|
||||
{%- endif %}
|
||||
{%- for host in groups['kube-master'] -%}
|
||||
{%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif -%}
|
||||
{%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif %}
|
||||
{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
||||
{%- endfor %}
|
||||
tags: facts
|
||||
|
@ -29,7 +29,54 @@
|
|||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
register: kubeadm_config
|
||||
|
||||
- name: kubeadm | Initialize cluster
|
||||
- name: Check if kubeadm has already run
|
||||
stat:
|
||||
path: "{{ kube_config_dir }}/admin.conf"
|
||||
register: admin_conf
|
||||
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks
|
||||
register: kubeadm_init
|
||||
when: kubeadm_config.changed
|
||||
#Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
when: inventory_hostname == groups['kube-master']|first and (kubeadm_config.changed or not admin_conf.stat.exists)
|
||||
|
||||
- name: slurp kubeadm certs
|
||||
slurp:
|
||||
src: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ kube_cert_dir }}/apiserver.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver.key"
|
||||
- "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
|
||||
- "{{ kube_cert_dir }}/ca.crt"
|
||||
- "{{ kube_cert_dir }}/ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.key"
|
||||
- "{{ kube_cert_dir }}/sa.key"
|
||||
- "{{ kube_cert_dir }}/sa.pub"
|
||||
register: kubeadm_certs
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
run_once: true
|
||||
|
||||
- name: write out kubeadm certs
|
||||
copy:
|
||||
dest: "{{ item.item }}"
|
||||
content: "{{ item.content | b64decode }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
no_log: true
|
||||
register: copy_kubeadm_certs
|
||||
with_items: "{{ kubeadm_certs.results }}"
|
||||
when: inventory_hostname != groups['kube-master']|first
|
||||
|
||||
- name: kubeadm | Initialize other masters
|
||||
command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks
|
||||
register: kubeadm_init
|
||||
#Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
when: inventory_hostname != groups['kube-master']|first and (kubeadm_config.changed or not admin_conf.stat.exists or copy_kubeadm_certs.changed)
|
||||
|
|
|
@ -2,6 +2,12 @@
|
|||
- include: pre-upgrade.yml
|
||||
tags: k8s-pre-upgrade
|
||||
|
||||
# upstream bug: https://github.com/kubernetes/kubeadm/issues/441
|
||||
- name: Disable kube_basic_auth until kubeadm/441 is fixed
|
||||
set_fact:
|
||||
kube_basic_auth: false
|
||||
when: kubeadm_enabled|bool|default(false)
|
||||
|
||||
- include: users-file.yml
|
||||
when: kube_basic_auth|default(true)
|
||||
|
||||
|
|
|
@ -2,9 +2,9 @@ apiVersion: kubeadm.k8s.io/v1alpha1
|
|||
kind: MasterConfiguration
|
||||
api:
|
||||
advertiseAddress: {{ ip | default(ansible_default_ipv4.address) }}
|
||||
bindPort: "{{ kube_apiserver_port }}"
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
etcd:
|
||||
endpoints:
|
||||
endpoints:
|
||||
{% for endpoint in etcd_access_endpoint.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
|
@ -18,17 +18,19 @@ networking:
|
|||
kubernetesVersion: {{ kube_version }}
|
||||
cloudProvider: {{ cloud_provider|default('') }}
|
||||
#TODO: cloud provider conf file
|
||||
authorizationModes:
|
||||
authorizationModes:
|
||||
- Node
|
||||
{% for mode in authorization_modes %}
|
||||
- {{ mode }}
|
||||
{% endfor %}
|
||||
token: {{ kubeadm_token }}
|
||||
tokenTTL: {{ kubeadm_token_ttl }}
|
||||
tokenTTL: "{{ kubeadm_token_ttl }}"
|
||||
selfHosted: false
|
||||
apiServerExtraArgs:
|
||||
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
|
||||
insecure-port: "{{ kube_apiserver_insecure_port }}"
|
||||
admission-control: {{ kube_apiserver_admission_control | join(',') }}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
||||
|
@ -65,3 +67,4 @@ apiServerCertSANs:
|
|||
- {{ san }}
|
||||
{% endfor %}
|
||||
certificatesDir: {{ kube_config_dir }}/ssl
|
||||
unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
|
||||
|
|
|
@ -15,8 +15,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{# Base kubelet args #}
|
||||
{% set kubelet_args_base -%}
|
||||
{# start kubeadm specific settings #}
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \
|
||||
--kubeconfig={{ kube_config_dir }}/kubelet.conf \
|
||||
--require-kubeconfig=true \
|
||||
--require-kubeconfig \
|
||||
--authorization-mode=Webhook \
|
||||
--client-ca-file={{ kube_cert_dir }}/ca.crt \
|
||||
--pod-manifest-path={{ kube_manifest_dir }} \
|
||||
|
|
|
@ -109,6 +109,7 @@
|
|||
- "{{ bin_dir }}/etcdctl"
|
||||
- "{{ bin_dir }}/kubernetes-scripts"
|
||||
- "{{ bin_dir }}/kubectl"
|
||||
- "{{ bin_dir }}/kubeadm"
|
||||
- "{{ bin_dir }}/helm"
|
||||
- "{{ bin_dir }}/calicoctl"
|
||||
- "{{ bin_dir }}/weave"
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
register: gce
|
||||
|
||||
- name: Add instances to host group
|
||||
add_host: hostname={{item.name}} ansible_host={{item.public_ip}} groupname="waitfor_hosts"
|
||||
add_host: hostname={{item.public_ip}} groupname="waitfor_hosts"
|
||||
with_items: '{{gce.instance_data}}'
|
||||
|
||||
- name: Template the inventory
|
||||
|
@ -61,4 +61,4 @@
|
|||
gather_facts: false
|
||||
tasks:
|
||||
- name: Wait for SSH to come up.
|
||||
local_action: wait_for host={{inventory_hostname}} port=22 delay=5 timeout=240 state=started
|
||||
local_action: wait_for host={{inventory_hostname}} port=22 delay=60 timeout=240 state=started
|
||||
|
|
|
@ -9,3 +9,4 @@
|
|||
password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
||||
validate_certs: no
|
||||
status_code: 200
|
||||
when: not kubeadm_enabled|default(false)
|
||||
|
|
Loading…
Reference in New Issue