Add support for kube-vip (#8669)

Signed-off-by: Mathieu Parent <math.parent@gmail.com>
pull/8698/head
Mathieu Parent 2022-04-07 19:37:57 +02:00 committed by GitHub
parent 19d5a1c7c3
commit 996ef98b87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 194 additions and 3 deletions

View File

@ -18,6 +18,7 @@
* [Weave](docs/weave.md)
* [Multus](docs/multus.md)
* Ingress
* [kube-vip](docs/kube-vip.md)
* [ALB Ingress](docs/ingress_controller/alb_ingress_controller.md)
* [MetalLB](docs/metallb.md)
* [Nginx Ingress](docs/ingress_controller/ingress_nginx.md)

View File

@ -156,6 +156,7 @@ The following tags are defined in playbooks:
| kubeadm | Roles linked to kubeadm tasks
| kube-apiserver | Configuring static pod kube-apiserver
| kube-controller-manager | Configuring static pod kube-controller-manager
| kube-vip | Installing and configuring kube-vip
| kubectl | Installing kubectl and bash completion
| kubelet | Configuring kubelet service
| kube-ovn | Network plugin kube-ovn

View File

@ -29,9 +29,7 @@ configure kubelet and kube-proxy on non-master nodes to use the local internal
loadbalancer.
If you choose to NOT use the local internal loadbalancer, you will need to
configure your own loadbalancer to achieve HA. Note that deploying a
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
By default, it only configures a non-HA endpoint, which points to the
use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
`access_ip` or IP address of the first server node in the `kube_control_plane` group.
It can also configure clients to use endpoints for a given loadbalancer type.
The following diagram shows how traffic to the apiserver is directed.

52
docs/kube-vip.md 100644
View File

@ -0,0 +1,52 @@
# kube-vip
kube-vip provides Kubernetes clusters with a virtual IP and load balancer for both the control plane (for building a highly-available cluster) and Kubernetes Services of type LoadBalancer without relying on any external hardware or software.
## Install
You have to explicitly enable the kube-vip extension:
```yaml
kube_vip_enabled: true
```
You also need to enable
[kube-vip as HA, Load Balancer, or both](https://kube-vip.chipzoller.dev/docs/installation/static/#kube-vip-as-ha-load-balancer-or-both):
```yaml
# HA for control-plane, requires a VIP
kube_vip_controlplane_enabled: true
kube_vip_address: 10.42.42.42
loadbalancer_apiserver:
address: "{{ kube_vip_address }}"
port: 6443
# kube_vip_interface: ens160
# LoadBalancer for services
kube_vip_services_enabled: false
# kube_vip_services_interface: ens320
```
> Note: When using `kube-vip` as LoadBalancer for services,
[additionnal manual steps](https://kube-vip.chipzoller.dev/docs/usage/cloud-provider/)
are needed.
If using [ARP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#arp) :
```yaml
kube_vip_arp_enabled: true
```
If using [BGP mode](https://kube-vip.chipzoller.dev/docs/installation/static/#bgp) :
```yaml
kube_vip_bgp_enabled: true
kube_vip_local_as: 65000
kube_vip_bgp_routerid: 192.168.0.2
kube_vip_bgppeers:
- 192.168.0.10:65000::false
- 192.168.0.11:65000::false
# kube_vip_bgp_peeraddress:
# kube_vip_bgp_peerpass:
# kube_vip_bgp_peeras:
```

View File

@ -875,6 +875,8 @@ kube_router_image_tag: "{{ kube_router_version }}"
multus_image_repo: "{{ github_image_repo }}/k8snetworkplumbingwg/multus-cni"
multus_image_tag: "{{ multus_version }}"
kube_vip_image_repo: "{{ github_image_repo }}/kube-vip/kube-vip"
kube_vip_image_tag: v0.4.2
nginx_image_repo: "{{ docker_image_repo }}/library/nginx"
nginx_image_tag: 1.21.4
haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy"
@ -1382,6 +1384,15 @@ downloads:
groups:
- k8s_cluster
kube-vip:
enabled: "{{ kube_vip_enabled }}"
container: true
repo: "{{ kube_vip_image_repo }}"
tag: "{{ kube_vip_image_tag }}"
sha256: "{{ kube_vip_digest_checksum|default(None) }}"
groups:
- kube_control_plane
nginx:
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
container: true

View File

@ -47,6 +47,26 @@ eviction_hard_control_plane: {}
kubelet_status_update_frequency: 10s
# kube-vip
kube_vip_version: v0.4.2
kube_vip_arp_enabled: false
kube_vip_interface:
kube_vip_services_interface:
kube_vip_cidr: 32
kube_vip_controlplane_enabled: false
kube_vip_ddns_enabled: false
kube_vip_services_enabled: false
kube_vip_leader_election_enabled: "{{ kube_vip_arp_enabled }}"
kube_vip_bgp_enabled: false
kube_vip_bgp_routerid:
kube_vip_local_as: 65000
kube_vip_bgp_peeraddress:
kube_vip_bgp_peerpass:
kube_vip_bgp_peeras:
kube_vip_bgppeers:
kube_vip_address:
# Requests for load balancer app
loadbalancer_apiserver_memory_requests: 32M
loadbalancer_apiserver_cpu_requests: 25m

View File

@ -0,0 +1,6 @@
---
- name: kube-vip | Write static pod
template:
src: manifests/kube-vip.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-vip.yml"
mode: 0640

View File

@ -17,6 +17,13 @@
tags:
- kubelet
- import_tasks: loadbalancer/kube-vip.yml
when:
- is_kube_master
- kube_vip_enabled
tags:
- kube-vip
- import_tasks: loadbalancer/nginx-proxy.yml
when:
- not is_kube_master or kube_apiserver_bind_address != '0.0.0.0'

View File

@ -0,0 +1,93 @@
# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.4.2/pkg/kubevip/config_generator.go#L13
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: {{ kube_vip_arp_enabled | string | to_json }}
- name: port
value: "6443"
{% if kube_vip_interface %}
- name: vip_interface
value: "{{ kube_vip_interface | string | to_json }}"
{% endif %}
{% if kube_vip_services_interface %}
- name: vip_servicesinterface
value: {{ kube_vip_services_interface | string | to_json }}
{% endif %}
{% if kube_vip_cidr %}
- name: vip_cidr
value: {{ kube_vip_cidr | string | to_json }}
{% endif %}
{% if kube_vip_controlplane_enabled %}
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: {{ kube_vip_ddns_enabled | string | to_json }}
{% endif %}
{% if kube_vip_services_enabled %}
- name: svc_enable
value: "true"
{% endif %}
{% if kube_vip_leader_election_enabled %}
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
{% endif %}
{% if kube_vip_bgp_enabled %}
- name: bgp_enable
value: "true"
- name: bgp_routerid
value: {{ kube_vip_bgp_routerid | string | to_json }}
- name: bgp_as
value: {{ kube_vip_local_as | string | to_json }}
- name: bgp_peeraddress
value: {{ kube_vip_bgp_peeraddress | to_json }}
- name: bgp_peerpass
value: {{ kube_vip_bgp_peerpass | to_json }}
- name: bgp_peeras
value: {{ kube_vip_bgp_peeras | to_json }}
{% if kube_vip_bgppeers %}
- name: bgp_peers
value: {{ kube_vip_bgp_peeras | join(',') | to_json }}
{% endif %}
{% endif %}
- name: address
value: {{ kube_vip_address | to_json }}
image: {{ kube_vip_image_repo }}:{{ kube_vip_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
status: {}

View File

@ -60,6 +60,8 @@ kube_proxy_nodeport_addresses: >-
# Set to true to allow pre-checks to fail and continue deployment
ignore_assert_errors: false
kube_vip_enabled: false
# nginx-proxy configure
nginx_config_dir: "/etc/nginx"