Update Flannel manifests, install script and version (0.12) + fix tests scripts (#5937)

* Add CI_TEST_VARS to tests

* Update flannel to 0.12.0 (with new manifests) and disable tx/rx
offloading in networking test
pull/5904/head
Florian Ruynat 2020-04-15 08:48:02 +02:00 committed by GitHub
parent b5125e59ab
commit 58f48500b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 136 additions and 69 deletions

View File

@ -125,7 +125,7 @@ Note: Upstart/SysV init based OS types are not supported.
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.5.5
- [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.11.0
- [flanneld](https://github.com/coreos/flannel) v0.12.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
- [multus](https://github.com/intel/multus-cni) v3.4.1
- [weave](https://github.com/weaveworks/weave) v2.5.2

View File

@ -1,5 +1,11 @@
# Flannel
Flannel is a network fabric for containers, designed for Kubernetes
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
## Verifying flannel install
* Flannel configuration file should have been created there
```ShellSession

View File

@ -72,7 +72,7 @@ calico_policy_version: "v3.13.2"
calico_typha_version: "v3.13.2"
typha_enabled: false
flannel_version: "v0.11.0"
flannel_version: "v0.12.0"
flannel_cni_version: "v0.3.0"
cni_version: "v0.8.5"

View File

@ -23,6 +23,3 @@ flannel_memory_limit: 500M
flannel_cpu_limit: 300m
flannel_memory_requests: 64M
flannel_cpu_requests: 150m
# Legacy directory, will be removed if found.
flannel_cert_dir: /etc/flannel/certs

View File

@ -9,3 +9,19 @@
register: flannel_node_manifests
when:
- inventory_hostname in groups['kube-master']
- name: Flannel | Set CNI directory permissions
file:
path: /opt/cni/bin
state: directory
owner: kube
recurse: true
mode: 0755
register: cni_bin_dir
- name: Flannel | Copy CNI plugins
unarchive:
src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
dest: "/opt/cni/bin"
mode: 0755
remote_src: yes

View File

@ -3,13 +3,66 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: "kube-system"
namespace: kube-system
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
{% if podsecuritypolicy_enabled and apparmor_enabled %}
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
{% endif %}
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
@ -29,14 +82,6 @@ rules:
- nodes/status
verbs:
- patch
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -49,4 +94,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: flannel
namespace: "kube-system"
namespace: kube-system

View File

@ -3,28 +3,27 @@ kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: "kube-system"
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name":"cni0",
"cniVersion":"0.3.1",
"plugins":[
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type":"flannel",
"delegate":{
"forceAddress":true,
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway":true
"isDefaultGateway": true
}
},
{
"type":"portmap",
"capabilities":{
"portMappings":true
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
@ -41,25 +40,22 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: "kube-system"
namespace: kube-system
labels:
tier: node
k8s-app: flannel
app: flannel
spec:
selector:
matchLabels:
tier: node
k8s-app: flannel
app: flannel
template:
metadata:
labels:
tier: node
k8s-app: flannel
app: flannel
spec:
priorityClassName: system-node-critical
serviceAccountName: flannel
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag }}
@ -73,7 +69,9 @@ spec:
memory: {{ flannel_memory_requests }}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ]
securityContext:
privileged: true
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
@ -83,45 +81,47 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: run
mountPath: /run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
initContainers:
- name: install-cni
image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }}
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations:
- operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
- key: CriticalAddonsOnly
operator: "Exists"
- operator: Exists
effect: NoSchedule
volumes:
- name: run
hostPath:
path: /run
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d

View File

@ -28,6 +28,3 @@ enable_nodelocaldns: false
kube_oidc_url: https://accounts.google.com/.well-known/openid-configuration
kube_oidc_client_id: kubespray-example
# Temp set k8s ver to 1.16.8
kube_version: v1.16.8

View File

@ -52,19 +52,19 @@ fi
# Tests Cases
## Test Master API
ansible-playbook --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
## Test that all pods are Running
ansible-playbook --limit "all:!fake_hosts" tests/testcases/015_check-pods-running.yml $ANSIBLE_LOG_LEVEL
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/015_check-pods-running.yml $ANSIBLE_LOG_LEVEL
## Test that all nodes are Ready
ansible-playbook --limit "all:!fake_hosts" tests/testcases/020_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/020_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
## Test pod creation and ping between them
ansible-playbook --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
## Advanced DNS checks
ansible-playbook --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
## Kubernetes conformance tests
ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL

View File

@ -14,6 +14,12 @@
netchecker_port: 31081
tasks:
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
shell: "ethtool --offload flannel.1 rx off tx off"
ignore_errors: true
when:
- kube_network_plugin == 'flannel'
- name: Force binaries directory for Container Linux by CoreOS and Flatcar
set_fact:
bin_dir: "/opt/bin"