Update Flannel manifests, install script and version (0.12) + fix tests scripts (#5937)

* Add CI_TEST_VARS to tests

* Update flannel to 0.12.0 (with new manifests) and disable tx/rx
offloading in networking test
pull/5904/head
Florian Ruynat 2020-04-15 08:48:02 +02:00 committed by GitHub
parent b5125e59ab
commit 58f48500b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 136 additions and 69 deletions

View File

@ -125,7 +125,7 @@ Note: Upstart/SysV init based OS types are not supported.
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.5.5 - [cilium](https://github.com/cilium/cilium) v1.5.5
- [contiv](https://github.com/contiv/install) v1.2.1 - [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.11.0 - [flanneld](https://github.com/coreos/flannel) v0.12.0
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0 - [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
- [multus](https://github.com/intel/multus-cni) v3.4.1 - [multus](https://github.com/intel/multus-cni) v3.4.1
- [weave](https://github.com/weaveworks/weave) v2.5.2 - [weave](https://github.com/weaveworks/weave) v2.5.2

View File

@ -1,5 +1,11 @@
# Flannel # Flannel
Flannel is a network fabric for containers, designed for Kubernetes
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
## Verifying flannel install
* Flannel configuration file should have been created there * Flannel configuration file should have been created there
```ShellSession ```ShellSession

View File

@ -72,7 +72,7 @@ calico_policy_version: "v3.13.2"
calico_typha_version: "v3.13.2" calico_typha_version: "v3.13.2"
typha_enabled: false typha_enabled: false
flannel_version: "v0.11.0" flannel_version: "v0.12.0"
flannel_cni_version: "v0.3.0" flannel_cni_version: "v0.3.0"
cni_version: "v0.8.5" cni_version: "v0.8.5"

View File

@ -23,6 +23,3 @@ flannel_memory_limit: 500M
flannel_cpu_limit: 300m flannel_cpu_limit: 300m
flannel_memory_requests: 64M flannel_memory_requests: 64M
flannel_cpu_requests: 150m flannel_cpu_requests: 150m
# Legacy directory, will be removed if found.
flannel_cert_dir: /etc/flannel/certs

View File

@ -9,3 +9,19 @@
register: flannel_node_manifests register: flannel_node_manifests
when: when:
- inventory_hostname in groups['kube-master'] - inventory_hostname in groups['kube-master']
- name: Flannel | Set CNI directory permissions
file:
path: /opt/cni/bin
state: directory
owner: kube
recurse: true
mode: 0755
register: cni_bin_dir
- name: Flannel | Copy CNI plugins
unarchive:
src: "{{ local_release_dir }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
dest: "/opt/cni/bin"
mode: 0755
remote_src: yes

View File

@ -3,13 +3,66 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: flannel name: flannel
namespace: "kube-system" namespace: kube-system
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
{% if podsecuritypolicy_enabled and apparmor_enabled %}
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
{% endif %}
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
--- ---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: flannel name: flannel
rules: rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -29,14 +82,6 @@ rules:
- nodes/status - nodes/status
verbs: verbs:
- patch - patch
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -49,4 +94,4 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: flannel name: flannel
namespace: "kube-system" namespace: kube-system

View File

@ -3,28 +3,27 @@ kind: ConfigMap
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: kube-flannel-cfg name: kube-flannel-cfg
namespace: "kube-system" namespace: kube-system
labels: labels:
tier: node tier: node
app: flannel app: flannel
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"name":"cni0", "name": "cbr0",
"cniVersion":"0.3.1", "cniVersion": "0.3.1",
"plugins":[ "plugins": [
{ {
"type":"flannel", "type": "flannel",
"delegate":{ "delegate": {
"forceAddress":true,
"hairpinMode": true, "hairpinMode": true,
"isDefaultGateway":true "isDefaultGateway": true
} }
}, },
{ {
"type":"portmap", "type": "portmap",
"capabilities":{ "capabilities": {
"portMappings":true "portMappings": true
} }
} }
] ]
@ -41,25 +40,22 @@ apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: kube-flannel name: kube-flannel
namespace: "kube-system" namespace: kube-system
labels: labels:
tier: node tier: node
k8s-app: flannel app: flannel
spec: spec:
selector: selector:
matchLabels: matchLabels:
tier: node app: flannel
k8s-app: flannel
template: template:
metadata: metadata:
labels: labels:
tier: node tier: node
k8s-app: flannel app: flannel
spec: spec:
priorityClassName: system-node-critical priorityClassName: system-node-critical
serviceAccountName: flannel serviceAccountName: flannel
nodeSelector:
beta.kubernetes.io/os: linux
containers: containers:
- name: kube-flannel - name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag }} image: {{ flannel_image_repo }}:{{ flannel_image_tag }}
@ -73,7 +69,9 @@ spec:
memory: {{ flannel_memory_requests }} memory: {{ flannel_memory_requests }}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ] command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ]
securityContext: securityContext:
privileged: true privileged: false
capabilities:
add: ["NET_ADMIN"]
env: env:
- name: POD_NAME - name: POD_NAME
valueFrom: valueFrom:
@ -83,45 +81,47 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts: volumeMounts:
- name: run - name: run
mountPath: /run mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
initContainers:
- name: install-cni
image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }}
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni - name: cni
mountPath: /etc/cni/net.d mountPath: /etc/cni/net.d
- name: flannel-cfg - name: flannel-cfg
mountPath: /etc/kube-flannel/ mountPath: /etc/kube-flannel/
- name: install-cni
image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirstWithHostNet
tolerations: tolerations:
- operator: Exists - operator: Exists
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12) effect: NoSchedule
- key: CriticalAddonsOnly
operator: "Exists"
volumes: volumes:
- name: run - name: run
hostPath: hostPath:
path: /run path: /run/flannel
- name: cni - name: cni
hostPath: hostPath:
path: /etc/cni/net.d path: /etc/cni/net.d

View File

@ -28,6 +28,3 @@ enable_nodelocaldns: false
kube_oidc_url: https://accounts.google.com/.well-known/openid-configuration kube_oidc_url: https://accounts.google.com/.well-known/openid-configuration
kube_oidc_client_id: kubespray-example kube_oidc_client_id: kubespray-example
# Temp set k8s ver to 1.16.8
kube_version: v1.16.8

View File

@ -52,19 +52,19 @@ fi
# Tests Cases # Tests Cases
## Test Master API ## Test Master API
ansible-playbook --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/010_check-apiserver.yml $ANSIBLE_LOG_LEVEL
## Test that all pods are Running ## Test that all pods are Running
ansible-playbook --limit "all:!fake_hosts" tests/testcases/015_check-pods-running.yml $ANSIBLE_LOG_LEVEL ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/015_check-pods-running.yml $ANSIBLE_LOG_LEVEL
## Test that all nodes are Ready ## Test that all nodes are Ready
ansible-playbook --limit "all:!fake_hosts" tests/testcases/020_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/020_check-nodes-ready.yml $ANSIBLE_LOG_LEVEL
## Test pod creation and ping between them ## Test pod creation and ping between them
ansible-playbook --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/030_check-network.yml $ANSIBLE_LOG_LEVEL
## Advanced DNS checks ## Advanced DNS checks
ansible-playbook --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL ansible-playbook --limit "all:!fake_hosts" -e @${CI_TEST_VARS} tests/testcases/040_check-network-adv.yml $ANSIBLE_LOG_LEVEL
## Kubernetes conformance tests ## Kubernetes conformance tests
ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL ansible-playbook -i ${ANSIBLE_INVENTORY} -e @${CI_TEST_VARS} --limit "all:!fake_hosts" tests/testcases/100_check-k8s-conformance.yml $ANSIBLE_LOG_LEVEL

View File

@ -14,6 +14,12 @@
netchecker_port: 31081 netchecker_port: 31081
tasks: tasks:
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
shell: "ethtool --offload flannel.1 rx off tx off"
ignore_errors: true
when:
- kube_network_plugin == 'flannel'
- name: Force binaries directory for Container Linux by CoreOS and Flatcar - name: Force binaries directory for Container Linux by CoreOS and Flatcar
set_fact: set_fact:
bin_dir: "/opt/bin" bin_dir: "/opt/bin"