diff --git a/roles/container-engine/docker/handlers/main.yml b/roles/container-engine/docker/handlers/main.yml index 2847088e1..7fadc62da 100644 --- a/roles/container-engine/docker/handlers/main.yml +++ b/roles/container-engine/docker/handlers/main.yml @@ -5,7 +5,6 @@ - Docker | reload systemd - Docker | reload docker.socket - Docker | reload docker - - Docker | pause while Docker restarts - Docker | wait for docker - name: Docker | reload systemd @@ -23,14 +22,9 @@ name: docker state: restarted -- name: Docker | pause while Docker restarts - pause: - seconds: 10 - prompt: "Waiting for docker restart" - - name: Docker | wait for docker command: "{{ docker_bin_dir }}/docker images" register: docker_ready - retries: 10 - delay: 5 + retries: 20 + delay: 1 until: docker_ready.rc == 0 diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index d9709fd56..a46cac0c7 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -40,8 +40,8 @@ client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" register: result until: result.status is defined and result.status == 200 - retries: 10 - delay: 5 + retries: 60 + delay: 1 - name: wait for etcd-events up uri: @@ -51,8 +51,8 @@ client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" register: result until: result.status is defined and result.status == 200 - retries: 10 - delay: 5 + retries: 60 + delay: 1 - name: set etcd_secret_changed set_fact: diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 56a2bd8a8..b5f93e4d2 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -1,31 +1,4 @@ --- -- name: Kubernetes Apps | Delete old CoreDNS resources - kube: - name: "coredns" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item }}" - state: absent - with_items: - - 'deploy' - - 'configmap' - - 'svc' - tags: - - upgrade - -- name: Kubernetes Apps | Delete old nodelocalDNS resources - kube: - name: "nodelocaldns" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item }}" - state: absent - with_items: - - 'deamonset' - - 'configmap' - tags: - - upgrade - - name: Kubernetes Apps | Delete kubeadm CoreDNS kube: name: "coredns" @@ -37,41 +10,3 @@ - kubeadm_init is defined - kubeadm_init.changed|default(false) - inventory_hostname == groups['kube-master'][0] - -- name: Kubernetes Apps | Delete old KubeDNS resources - kube: - name: "kube-dns" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item }}" - state: absent - with_items: - - 'deploy' - - 'svc' - tags: - - upgrade - -- name: Kubernetes Apps | Delete kubeadm KubeDNS - kube: - name: "kube-dns" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item }}" - state: absent - with_items: - - 'deploy' - - 'svc' - when: - - kubeadm_init is defined - - kubeadm_init.changed|default(false) - - inventory_hostname == groups['kube-master'][0] - -- name: Kubernetes Apps | Delete old KubeDNS Autoscaler deployment - kube: - name: "kubedns-autoscaler" - namespace: "kube-system" - kubectl: "{{ bin_dir }}/kubectl" - resource: "deploy" - state: absent - tags: - - upgrade diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml index 4c9ad5c74..067830446 100644 --- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -1,15 +1,4 @@ --- -- name: Kubernetes Apps | Delete old kubernetes-dashboard resources - kube: - name: "kubernetes-dashboard" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item }}" - state: absent - with_items: - - 'ClusterRoleBinding' - tags: - - upgrade - - name: Kubernetes Apps | Lay down dashboard template template: src: "{{ item.file }}.j2" diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 166b4ec4c..63c4351de 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -7,8 +7,8 @@ client_key: "{{ kube_apiserver_client_key }}" register: result until: result.status == 200 - retries: 10 - delay: 2 + retries: 20 + delay: 1 when: inventory_hostname == groups['kube-master'][0] - name: Kubernetes Apps | Cleanup DNS diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 01720eaf7..cf115db77 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -51,15 +51,6 @@ when: - inventory_hostname == groups['kube-master'][0] -- name: Kubernetes Apps | Purge old Netchecker server - kube: - name: "netchecker-server" - namespace: "{{ netcheck_namespace }}" - kubectl: "{{bin_dir}}/kubectl" - resource: "po" - state: absent - when: inventory_hostname == groups['kube-master'][0] - - name: Kubernetes Apps | Start Netchecker Resources kube: name: "{{item.item.name}}" diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml index c3c5d9c17..6858426f2 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -1,23 +1,5 @@ --- -- name: NGINX Ingress Controller | Remove legacy addon dir and manifests - file: - path: "{{ kube_config_dir }}/addons/ingress_nginx" - state: absent - when: - - inventory_hostname == groups['kube-master'][0] - tags: - - upgrade - -- name: NGINX Ingress Controller | Remove legacy namespace - shell: | - {{ bin_dir }}/kubectl delete namespace {{ ingress_nginx_namespace }} - ignore_errors: yes - when: - - inventory_hostname == groups['kube-master'][0] - tags: - - upgrade - - name: NGINX Ingress Controller | Create addon dir file: path: "{{ kube_config_dir }}/addons/ingress_nginx" diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 8e753e56a..edd91232f 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -43,23 +43,23 @@ - name: Master | Remove apiserver container shell: "docker ps -af name=k8s_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f" register: remove_apiserver_container - retries: 4 + retries: 10 until: remove_apiserver_container.rc == 0 - delay: 5 + delay: 1 - name: Master | Remove scheduler container shell: "docker ps -af name=k8s_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f" register: remove_scheduler_container - retries: 4 + retries: 10 until: remove_scheduler_container.rc == 0 - delay: 5 + delay: 1 - name: Master | Remove controller manager container shell: "docker ps -af name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f" register: remove_cm_container - retries: 4 + retries: 10 until: remove_cm_container.rc == 0 - delay: 5 + delay: 1 - name: Master | wait for kube-scheduler uri: @@ -67,15 +67,15 @@ register: scheduler_result until: scheduler_result.status == 200 retries: 60 - delay: 5 + delay: 1 - name: Master | wait for kube-controller-manager uri: url: http://localhost:10252/healthz register: controller_manager_result until: controller_manager_result.status == 200 - retries: 15 - delay: 5 + retries: 60 + delay: 1 - name: Master | wait for the apiserver to be running uri: @@ -85,8 +85,8 @@ client_key: "{{ kube_apiserver_client_key }}" register: result until: result.status == 200 - retries: 30 - delay: 10 + retries: 60 + delay: 1 - name: Master | set secret_changed command: /bin/true diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 371f03847..3fd9855ea 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -1,29 +1,12 @@ --- -- name: "Pre-upgrade | etcd3 upgrade | see if old config exists" - command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions" - environment: - ETCDCTL_API: 2 - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}" - register: old_data_exists - delegate_to: "{{groups['etcd'][0]}}" - changed_when: false - when: kube_apiserver_storage_backend == "etcd3" - failed_when: false - -- name: "Pre-upgrade | etcd3 upgrade | use etcd2 unless forced to etcd3" - set_fact: - kube_apiserver_storage_backend: "etcd2" - when: old_data_exists.rc == 0 and not force_etcd3|bool - -- name: "Pre-upgrade | Delete master manifests" +- name: "Pre-upgrade | Delete master manifests if etcd secrets changed" file: path: "/etc/kubernetes/manifests/{{item}}.manifest" state: absent with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] register: kube_apiserver_manifest_replaced - when: (secret_changed|default(false) or etcd_secret_changed|default(false)) + when: etcd_secret_changed|default(false) - name: "Pre-upgrade | Delete master containers forcefully" shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f" @@ -31,6 +14,6 @@ - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] when: kube_apiserver_manifest_replaced.changed register: remove_master_container - retries: 4 + retries: 10 until: remove_master_container.rc == 0 - delay: 5 + delay: 1 diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index c0c3aee3e..0124fe237 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -1,6 +1,4 @@ --- -- import_tasks: pre-upgrade.yml - - name: Flannel | Create Flannel manifests template: src: "{{item.file}}.j2" diff --git a/roles/network_plugin/flannel/tasks/pre-upgrade.yml b/roles/network_plugin/flannel/tasks/pre-upgrade.yml deleted file mode 100644 index ef50ceb09..000000000 --- a/roles/network_plugin/flannel/tasks/pre-upgrade.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Flannel pre-upgrade | Purge legacy flannel systemd unit file - file: - path: "/etc/systemd/system/docker.service.d/flannel-options.conf" - state: absent - notify: - - Flannel | delete default docker bridge - -- name: Flannel pre-upgrade | Purge legacy Flannel static pod manifest - file: - path: "{{ kube_manifest_dir }}/flannel-pod.manifest" - state: absent - notify: - - Flannel | delete flannel interface - -- name: Flannel pre-upgrade | Remove Flannel's certificate directory not required by CNI - file: - dest: "{{ flannel_cert_dir }}" - state: absent