From c75f394707dc3e92e7291e8e5751ef1f7f142c94 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 13 Dec 2016 11:43:06 +0100 Subject: [PATCH] Address standalone kubelet config case Also place in global vars and do not repeat the kube_*_config_dir and kube_namespace vars for better code maintainability and UX. Signed-off-by: Bogdan Dobrelya --- .../kubernetes-pv/ansible/tasks/main.yaml | 4 ++-- inventory/group_vars/all.yml | 22 +++++++++++++++++ roles/dnsmasq/tasks/main.yml | 6 ++--- roles/dnsmasq/templates/dnsmasq-ds.yml | 2 +- roles/dnsmasq/templates/dnsmasq-svc.yml | 2 +- .../kubernetes-apps/ansible/defaults/main.yml | 3 --- .../tasks/calico-policy-controller.yml | 2 +- roles/kubernetes-apps/ansible/tasks/main.yaml | 2 +- .../templates/calico-policy-controller.yml.j2 | 4 ++-- .../ansible/templates/kubedns-rc.yml | 2 +- .../ansible/templates/kubedns-svc.yml | 2 +- .../network_plugin/canal/tasks/main.yaml | 8 +++---- roles/kubernetes/master/defaults/main.yml | 24 ------------------- roles/kubernetes/master/files/namespace.yml | 2 +- roles/kubernetes/master/tasks/main.yml | 15 ++++++------ .../manifests/kube-apiserver.manifest.j2 | 2 +- .../kube-controller-manager.manifest.j2 | 2 +- roles/kubernetes/master/vars/main.yml | 2 +- roles/kubernetes/node/defaults/main.yml | 20 ---------------- roles/kubernetes/node/tasks/main.yml | 5 ++++ roles/kubernetes/node/tasks/nginx-proxy.yml | 2 +- .../node/templates/deb-kubelet.initd.j2 | 2 +- .../node/templates/kubelet-container.j2 | 2 +- roles/kubernetes/node/templates/kubelet.j2 | 16 ++++++++----- .../node/templates/kubelet.service.j2 | 2 +- .../manifests/kube-proxy.manifest.j2 | 12 +++++----- .../manifests/nginx-proxy.manifest.j2 | 2 +- .../node/templates/rh-kubelet.initd.j2 | 4 ++-- roles/kubernetes/preinstall/defaults/main.yml | 20 ---------------- roles/kubernetes/secrets/defaults/main.yml | 21 ---------------- roles/network_plugin/canal/tasks/main.yml | 4 ++-- roles/network_plugin/flannel/tasks/main.yml | 2 +- .../flannel/templates/flannel-pod.yml | 2 +- roles/reset/tasks/main.yml | 2 +- scripts/collect-info.yaml | 18 +++++++------- 35 files changed, 92 insertions(+), 150 deletions(-) delete mode 100644 roles/kubernetes/secrets/defaults/main.yml diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml index 970540110..a1c5d7f8a 100644 --- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml +++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml @@ -1,6 +1,6 @@ --- - name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV - template: src={{item.file}} dest=/etc/kubernetes/{{item.dest}} + template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}} with_items: - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json} - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} @@ -13,7 +13,7 @@ namespace: default kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" - filename: "/etc/kubernetes/{{item.item.dest}}" + filename: "{{kube_config_dir}}/{{item.item.dest}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ gluster_pv.results }}" when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index 65b65fe39..c90e173e7 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -4,6 +4,28 @@ bootstrap_os: none # Directory where the binaries will be installed bin_dir: /usr/local/bin +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernets. +# This puts them in a sane location and namespace. +# Editting those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" +system_namespace: kube-system + +# Logging directory (sysvinit systems) +kube_log_dir: "/var/log/kubernetes" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +# This is where to save basic auth file +kube_users_dir: "{{ kube_config_dir }}/users" + # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) local_release_dir: "/tmp/releases" diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index fa89d6c6a..468b23779 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -34,7 +34,7 @@ state: link - name: Create dnsmasq manifests - template: src={{item.file}} dest=/etc/kubernetes/{{item.file}} + template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}} with_items: - {file: dnsmasq-ds.yml, type: ds} - {file: dnsmasq-svc.yml, type: svc} @@ -44,10 +44,10 @@ - name: Start Resources kube: name: dnsmasq - namespace: kube-system + namespace: "{{system_namespace}}" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" - filename: /etc/kubernetes/{{item.item.file}} + filename: "{{kube_config_dir}}/{{item.item.file}}" state: "{{item.changed | ternary('latest','present') }}" with_items: "{{ manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/dnsmasq/templates/dnsmasq-ds.yml b/roles/dnsmasq/templates/dnsmasq-ds.yml index 2f4a1cdd7..08ff70bff 100644 --- a/roles/dnsmasq/templates/dnsmasq-ds.yml +++ b/roles/dnsmasq/templates/dnsmasq-ds.yml @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: dnsmasq - namespace: kube-system + namespace: "{{system_namespace}}" labels: k8s-app: dnsmasq spec: diff --git a/roles/dnsmasq/templates/dnsmasq-svc.yml b/roles/dnsmasq/templates/dnsmasq-svc.yml index 52be6fd83..1606aa932 100644 --- a/roles/dnsmasq/templates/dnsmasq-svc.yml +++ b/roles/dnsmasq/templates/dnsmasq-svc.yml @@ -6,7 +6,7 @@ metadata: kubernetes.io/cluster-service: 'true' k8s-app: dnsmasq name: dnsmasq - namespace: kube-system + namespace: {{system_namespace}} spec: ports: - port: 53 diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 1b9e9c3f6..dd2bd2d8a 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -1,6 +1,3 @@ -kube_config_dir: /etc/kubernetes -kube_namespace: kube-system - # Versions kubedns_version: 1.9 kubednsmasq_version: 1.3 diff --git a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml index 02a49f211..a3915f9ba 100644 --- a/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml +++ b/roles/kubernetes-apps/ansible/tasks/calico-policy-controller.yml @@ -8,6 +8,6 @@ name: "calico-policy-controller" kubectl: "{{bin_dir}}/kubectl" filename: "{{kube_config_dir}}/calico-policy-controller.yml" - namespace: "{{kube_namespace}}" + namespace: "{{system_namespace}}" resource: "rs" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/main.yaml b/roles/kubernetes-apps/ansible/tasks/main.yaml index 2977772c3..1b4c77eff 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yaml +++ b/roles/kubernetes-apps/ansible/tasks/main.yaml @@ -11,7 +11,7 @@ - name: Kubernetes Apps | Start Resources kube: name: kubedns - namespace: "{{ kube_namespace }}" + namespace: "{{ system_namespace }}" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 index a522c80ad..1bc553316 100644 --- a/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/calico-policy-controller.yml.j2 @@ -2,7 +2,7 @@ apiVersion: extensions/v1beta1 kind: ReplicaSet metadata: name: calico-policy-controller - namespace: {{ kube_namespace }} + namespace: {{ system_namespace }} labels: k8s-app: calico-policy kubernetes.io/cluster-service: "true" @@ -15,7 +15,7 @@ spec: template: metadata: name: calico-policy-controller - namespace: kube-system + namespace: {{system_namespace}} labels: kubernetes.io/cluster-service: "true" k8s-app: calico-policy diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml index 0fe4d2f58..a7392cc87 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-rc.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: kubedns - namespace: {{ kube_namespace }} + namespace: {{ system_namespace }} labels: k8s-app: kubedns version: v19 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml index 7f88d0666..ce8779326 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: kubedns - namespace: {{ kube_namespace }} + namespace: {{ system_namespace }} labels: k8s-app: kubedns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml index c6bcd6992..1b8de999a 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yaml @@ -3,15 +3,15 @@ kube: name: "canal-config" kubectl: "{{bin_dir}}/kubectl" - filename: "/etc/kubernetes/canal-config.yaml" + filename: "{{kube_config_dir}}/canal-config.yaml" resource: "configmap" - namespace: "kube-system" + namespace: "{{system_namespace}}" - name: Start flannel and calico-node run_once: true kube: name: "canal-node" kubectl: "{{bin_dir}}/kubectl" - filename: "/etc/kubernetes/canal-node.yaml" + filename: "{{kube_config_dir}}/canal-node.yaml" resource: "ds" - namespace: "kube-system" + namespace: "{{system_namespace}}" diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index c33fa788f..c1fbbb583 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -1,28 +1,7 @@ -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - # An experimental dev/test only dynamic volumes provisioner, # for PetSets. Works for kube>=v1.3 only. kube_hostpath_dynamic_provisioner: "false" -# This is where you can drop yaml/json files and the kubelet will run those -# pods on startup -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - # change to 0.0.0.0 to enable insecure access from anywhere (not recommended) kube_apiserver_insecure_bind_address: 127.0.0.1 @@ -30,9 +9,6 @@ kube_apiserver_insecure_bind_address: 127.0.0.1 # Inclusive at both ends of the range. kube_apiserver_node_port_range: "30000-32767" -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - # ETCD cert dir for connecting apiserver to etcd etcd_config_dir: /etc/ssl/etcd etcd_cert_dir: "{{ etcd_config_dir }}/ssl" diff --git a/roles/kubernetes/master/files/namespace.yml b/roles/kubernetes/master/files/namespace.yml index 986f4b482..9bdf201a2 100644 --- a/roles/kubernetes/master/files/namespace.yml +++ b/roles/kubernetes/master/files/namespace.yml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: kube-system + name: "{{system_namespace}}" diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 8e3353a21..e1b5cc5d2 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -36,28 +36,27 @@ tags: kube-apiserver - meta: flush_handlers -# Create kube-system namespace -- name: copy 'kube-system' namespace manifest - copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml + +- name: copy kube system namespace manifest + copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml run_once: yes when: inventory_hostname == groups['kube-master'][0] tags: apps -- name: Check if kube-system exists - command: "{{ bin_dir }}/kubectl get ns kube-system" +- name: Check if kube system namespace exists + command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}" register: 'kubesystem' changed_when: False failed_when: False run_once: yes tags: apps -- name: Create 'kube-system' namespace - command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml" +- name: Create kube system namespace + command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml" changed_when: False when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] tags: apps -# Write other manifests - name: Write kube-controller-manager manifest template: src: manifests/kube-controller-manager.manifest.j2 diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 530b009c6..b292e5106 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-apiserver - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-apiserver spec: diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index cdfbef064..1385b3cf4 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-controller-manager - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-controller spec: diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml index 2eeb525fe..a5eba4f2b 100644 --- a/roles/kubernetes/master/vars/main.yml +++ b/roles/kubernetes/master/vars/main.yml @@ -3,4 +3,4 @@ namespace_kubesystem: apiVersion: v1 kind: Namespace metadata: - name: kube-system \ No newline at end of file + name: "{{system_namespace}}" diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 8c4ce38a5..b0f73e50d 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -1,15 +1,6 @@ -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - # change to 0.0.0.0 to enable insecure access from anywhere (not recommended) kube_apiserver_insecure_bind_address: 127.0.0.1 -# This is where you can drop yaml/json files and the kubelet will run those -# pods on startup -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - -dns_domain: "{{ cluster_name }}" - # resolv.conf to base dns config kube_resolv_conf: "/etc/resolv.conf" @@ -22,16 +13,5 @@ kube_proxy_masquerade_all: true # - extensions/v1beta1/daemonsets=true # - extensions/v1beta1/deployments=true -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - nginx_image_repo: nginx nginx_image_tag: 1.11.4-alpine diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 67cc4ca86..3e0c095e1 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -1,4 +1,9 @@ --- +- set_fact: + standalone_kubelet: >- + {%- if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] -%}true{%- else -%}false{%- endif -%} + tags: facts + - include: install.yml tags: kubelet diff --git a/roles/kubernetes/node/tasks/nginx-proxy.yml b/roles/kubernetes/node/tasks/nginx-proxy.yml index 056c55a93..885b84f8f 100644 --- a/roles/kubernetes/node/tasks/nginx-proxy.yml +++ b/roles/kubernetes/node/tasks/nginx-proxy.yml @@ -1,6 +1,6 @@ --- - name: nginx-proxy | Write static pod - template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml + template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml - name: nginx-proxy | Make nginx directory file: path=/etc/nginx state=directory mode=0700 owner=root diff --git a/roles/kubernetes/node/templates/deb-kubelet.initd.j2 b/roles/kubernetes/node/templates/deb-kubelet.initd.j2 index 5d5184efe..6f349b8f2 100644 --- a/roles/kubernetes/node/templates/deb-kubelet.initd.j2 +++ b/roles/kubernetes/node/templates/deb-kubelet.initd.j2 @@ -27,7 +27,7 @@ DAEMON_USER=root [ -x "$DAEMON" ] || exit 0 # Read configuration variable file if it is present -[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env +[ -r {{kube_config_dir}}/$NAME.env ] && . {{kube_config_dir}}/$NAME.env # Define LSB log_* functions. # Depend on lsb-base (>= 3.2-14) to ensure that this file is present diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 45a76accc..7d4f536ab 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -3,7 +3,7 @@ --net=host --pid=host --name=kubelet --restart=on-failure:5 \ -v /etc/cni:/etc/cni:ro \ -v /opt/cni:/opt/cni:ro \ --v /etc/kubernetes:/etc/kubernetes \ +-v {{kube_config_dir}}:{{kube_config_dir}} \ -v /sys:/sys \ -v /dev:/dev \ -v {{ docker_daemon_graph }}:/var/lib/docker \ diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 3c1f31ab2..a9ecce448 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -12,17 +12,21 @@ KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}" # KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" -{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %} -KUBELET_REGISTER_NODE="--register-node=false" -{% endif %} # location of the api-server +{% set kubelet_args_base %}--pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}{% endset %} {% if dns_setup|bool and skip_dnsmasq|bool %} -KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" +{% set kubelet_args_dns %}--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }} {{ kubelet_args_base }}{% endset %} {% elif dns_setup|bool %} -KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" +{% set kubelet_args_dns %}--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }} {{ kubelet_args_base }}{% endset %} {% else %} -KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" +{% set kubelet_args_dns = kubelet_args_base %} {% endif %} +{% if not standalone_kubelet|bool %} +{% set kubelet_args %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig {{ kubelet_args_dns }}{% endset %} +{% else %} +{% set kubelet_args = kubelet_args_dns %} +{% endif %} +KUBELET_ARGS="{{ kubelet_args }}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} diff --git a/roles/kubernetes/node/templates/kubelet.service.j2 b/roles/kubernetes/node/templates/kubelet.service.j2 index ad62d8562..b3113d5da 100644 --- a/roles/kubernetes/node/templates/kubelet.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.service.j2 @@ -10,7 +10,7 @@ Wants=docker.socket {% endif %} [Service] -EnvironmentFile=/etc/kubernetes/kubelet.env +EnvironmentFile={{kube_config_dir}}/kubelet.env ExecStart={{ bin_dir }}/kubelet \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 12a1a7663..694ee1e36 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-proxy - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-proxy spec: @@ -17,7 +17,7 @@ spec: - --v={{ kube_log_level }} - --master={{ kube_apiserver_endpoint }} {% if not is_kube_master %} - - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml + - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml {% endif %} - --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --cluster-cidr={{ kube_pods_subnet }} @@ -31,10 +31,10 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: /etc/kubernetes/node-kubeconfig.yaml + - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml name: "kubeconfig" readOnly: true - - mountPath: /etc/kubernetes/ssl + - mountPath: {{kube_config_dir}}/ssl name: "etc-kube-ssl" readOnly: true - mountPath: /var/run/dbus @@ -46,10 +46,10 @@ spec: path: /usr/share/ca-certificates - name: "kubeconfig" hostPath: - path: "/etc/kubernetes/node-kubeconfig.yaml" + path: "{{kube_config_dir}}/node-kubeconfig.yaml" - name: "etc-kube-ssl" hostPath: - path: "/etc/kubernetes/ssl" + path: "{{kube_config_dir}}/ssl" - name: "var-run-dbus" hostPath: path: "/var/run/dbus" diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index 0930ee61e..db15bd2b9 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: nginx-proxy - namespace: kube-system + namespace: {{system_namespace}} labels: k8s-app: kube-nginx spec: diff --git a/roles/kubernetes/node/templates/rh-kubelet.initd.j2 b/roles/kubernetes/node/templates/rh-kubelet.initd.j2 index 5a709e118..faae10d1a 100644 --- a/roles/kubernetes/node/templates/rh-kubelet.initd.j2 +++ b/roles/kubernetes/node/templates/rh-kubelet.initd.j2 @@ -27,7 +27,7 @@ pidfile="/var/run/$prog.pid" lockfile="/var/lock/subsys/$prog" logfile="/var/log/$prog" -[ -e /etc/kubernetes/$prog.env ] && . /etc/kubernetes/$prog.env +[ -e {{kube_config_dir}}/$prog.env ] && . {{kube_config_dir}}/$prog.env start() { if [ ! -x $exec ]; then @@ -35,7 +35,7 @@ start() { echo "Docker executable $exec not found" else echo "You do not have permission to execute the Docker executable $exec" - fi + fi exit 5 fi diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 64f0ff24b..35ad8abea 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -1,26 +1,6 @@ --- run_gitinfos: false -# This directory is where all the additional scripts go -# that Kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where you can drop yaml/json files and the kubelet will run those -# pods on startup -kube_manifest_dir: "{{ kube_config_dir }}/manifests" - epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" common_required_pkgs: diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml deleted file mode 100644 index c6011a9bf..000000000 --- a/roles/kubernetes/secrets/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -# This directory is where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. -# This puts them in a sane location. -# Editting this value will almost surely break something. Don't -# change it. Things like the systemd scripts are hard coded to -# look in here. Don't do it. -kube_config_dir: /etc/kubernetes - -# This directory is where all the additional scripts go -# that Kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index 15ce2f657..d968e9e46 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -35,12 +35,12 @@ - name: Canal | Write canal configmap template: src: canal-config.yml.j2 - dest: /etc/kubernetes/canal-config.yaml + dest: "{{kube_config_dir}}/canal-config.yaml" - name: Canal | Write canal node configuration template: src: canal-node.yml.j2 - dest: /etc/kubernetes/canal-node.yaml + dest: "{{kube_config_dir}}/canal-node.yaml" - name: Canal | Copy cni plugins from hyperkube command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/" diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index f4ca65d12..4dde123ae 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -10,7 +10,7 @@ - name: Flannel | Create flannel pod manifest template: src: flannel-pod.yml - dest: /etc/kubernetes/manifests/flannel-pod.manifest + dest: "{{kube_manifest_dir}}/flannel-pod.manifest" notify: Flannel | delete default docker bridge - name: Flannel | Wait for flannel subnet.env file presence diff --git a/roles/network_plugin/flannel/templates/flannel-pod.yml b/roles/network_plugin/flannel/templates/flannel-pod.yml index 70b62e9ac..1af2152ea 100644 --- a/roles/network_plugin/flannel/templates/flannel-pod.yml +++ b/roles/network_plugin/flannel/templates/flannel-pod.yml @@ -3,7 +3,7 @@ apiVersion: "v1" metadata: name: "flannel" - namespace: "kube-system" + namespace: "{{system_namespace}}" labels: app: "flannel" version: "v0.1" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index bdacbbfc4..74a92abd5 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -34,7 +34,7 @@ - name: reset | delete some files and directories file: path={{ item }} state=absent with_items: - - /etc/kubernetes/ + - "{{kube_config_dir}}" - /var/lib/kubelet - /var/lib/etcd - /etc/ssl/etcd diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 03447842f..d0f3b9df0 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -41,31 +41,31 @@ cmd: journalctl -u kubelet --no-pager - name: kubedns_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kubedns -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system kubedns; done" + do kubectl logs ${i} --namespace {{system_namespace}} kubedns; done" - name: apiserver_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-apiserver -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: controller_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-controller -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: scheduler_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-scheduler -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: proxy_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-proxy -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: nginx_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-nginx -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system; done" + do kubectl logs ${i} --namespace {{system_namespace}}; done" - name: flannel_logs cmd: "for i in `kubectl get pods --all-namespaces -l app=flannel -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system flannel-container; done" + do kubectl logs ${i} --namespace {{system_namespace}} flannel-container; done" - name: canal_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=canal-node -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system flannel; done" + do kubectl logs ${i} --namespace {{system_namespace}} flannel; done" - name: calico_policy_logs cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=calico-policy -o jsonpath={.items..metadata.name}`; - do kubectl logs ${i} --namespace kube-system calico-policy-controller; done" + do kubectl logs ${i} --namespace {{system_namespace}} calico-policy-controller; done" logs: - /var/log/syslog