Remove PodSecurityPolicy support and references (#10723)

This is removed from kubernetes since 1.25, time to cut some dead code.
pull/10730/head
Max Gautier 2023-12-18 14:13:43 +01:00 committed by GitHub
parent 7395c27932
commit 471326f458
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 4 additions and 619 deletions

View File

@ -120,7 +120,7 @@ kube_pod_security_default_enforce: restricted
Let's take a deep look to the resultant **kubernetes** configuration:
* The `anonymous-auth` (on `kube-apiserver`) is set to `true` by default. This is fine, because it is considered safe if you enable `RBAC` for the `authorization-mode`.
* The `enable-admission-plugins` has not the `PodSecurityPolicy` admission plugin. This because it is going to be definitely removed from **kubernetes** `v1.25`. For this reason we decided to set the newest `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
* The `enable-admission-plugins` includes `PodSecurity` (for more details, please take a look here: <https://kubernetes.io/docs/concepts/security/pod-security-admission/>). Then, we set the `EventRateLimit` plugin, providing additional configuration files (that are automatically created under the hood and mounted inside the `kube-apiserver` container) to make it work.
* The `encryption-provider-config` provide encryption at rest. This means that the `kube-apiserver` encrypt data that is going to be stored before they reach `etcd`. So the data is completely unreadable from `etcd` (in case an attacker is able to exploit this).
* The `rotateCertificates` in `KubeletConfiguration` is set to `true` along with `serverTLSBootstrap`. This could be used in alternative to `tlsCertFile` and `tlsPrivateKeyFile` parameters. Additionally it automatically generates certificates by itself. By default the CSRs are approved automatically via [kubelet-csr-approver](https://github.com/postfinance/kubelet-csr-approver). You can customize approval configuration by modifying Helm values via `kubelet_csr_approver_values`.
See <https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/> for more information on the subject.

View File

@ -254,8 +254,6 @@ node_taints:
- "node.example.com/external=true:NoSchedule"
```
* *podsecuritypolicy_enabled* - When set to `true`, enables the PodSecurityPolicy admission controller and defines two policies `privileged` (applying to all resources in `kube-system` namespace and kubelet) and `restricted` (applying all other namespaces).
Addons deployed in kube-system namespaces are handled.
* *kubernetes_audit* - When set to `true`, enables Auditing.
The auditing parameters can be tuned via the following variables (which default values are shown below):
* `audit_log_path`: /var/log/audit/kube-apiserver-audit.log

View File

@ -243,15 +243,6 @@ kubernetes_audit: false
# kubelet_config_dir:
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
podsecuritypolicy_enabled: false
# Custom PodSecurityPolicySpec for restricted policy
# podsecuritypolicy_restricted_spec: {}
# Custom PodSecurityPolicySpec for privileged policy
# podsecuritypolicy_privileged_spec: {}
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
# kubeconfig_localhost: false
# Use ansible_host as external api ip when copying over kubeconfig.

View File

@ -81,7 +81,7 @@ netchecker_etcd_memory_limit: 256M
netchecker_etcd_cpu_requests: 100m
netchecker_etcd_memory_requests: 128M
# SecurityContext when PodSecurityPolicy is enabled
# SecurityContext (user/group)
netchecker_agent_user: 1000
netchecker_server_user: 1000
netchecker_agent_group: 1000

View File

@ -24,15 +24,6 @@
- {file: netchecker-server-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-server}
- {file: netchecker-server-deployment.yml, type: deployment, name: netchecker-server}
- {file: netchecker-server-svc.yml, type: svc, name: netchecker-service}
netchecker_templates_for_psp:
- {file: netchecker-agent-hostnet-psp.yml, type: podsecuritypolicy, name: netchecker-agent-hostnet-policy}
- {file: netchecker-agent-hostnet-clusterrole.yml, type: clusterrole, name: netchecker-agent}
- {file: netchecker-agent-hostnet-clusterrolebinding.yml, type: clusterrolebinding, name: netchecker-agent}
- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
set_fact:
netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}"
when: podsecuritypolicy_enabled
- name: Kubernetes Apps | Lay Down Netchecker Template
template:

View File

@ -1,14 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
rules:
- apiGroups:
- policy
resourceNames:
- netchecker-agent-hostnet
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,13 +0,0 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:netchecker-agent-hostnet
namespace: {{ netcheck_namespace }}
subjects:
- kind: ServiceAccount
name: netchecker-agent
namespace: {{ netcheck_namespace }}
roleRef:
kind: ClusterRole
name: psp:netchecker-agent-hostnet
apiGroup: rbac.authorization.k8s.io

View File

@ -1,44 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: netchecker-agent-hostnet
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -1,65 +0,0 @@
---
podsecuritypolicy_restricted_spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
runAsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false
podsecuritypolicy_privileged_spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
runAsGroup:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false
# This will fail if allowed-unsafe-sysctls is not set accordingly in kubelet flags
allowedUnsafeSysctls:
- '*'

View File

@ -162,56 +162,6 @@ roleRef:
name: csi-gce-pd-resizer-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-controller-deploy
rules:
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- csi-gce-pd-controller-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: csi-gce-pd-controller-deploy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-controller-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-gce-pd-node-deploy
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- csi-gce-pd-node-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: csi-gce-pd-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-node-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-node-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@ -288,4 +238,4 @@ subjects:
roleRef:
kind: Role
name: csi-gce-pd-leaderelection-role
apiGroup: rbac.authorization.k8s.io
apiGroup: rbac.authorization.k8s.io

View File

@ -49,15 +49,6 @@
- { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
- { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: deploy }
- { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
cephfs_provisioner_templates_for_psp:
- { name: psp-cephfs-provisioner, file: psp-cephfs-provisioner.yml, type: psp }
- name: CephFS Provisioner | Append extra templates to CephFS Provisioner Templates list for PodSecurityPolicy
set_fact:
cephfs_provisioner_templates: "{{ cephfs_provisioner_templates_for_psp + cephfs_provisioner_templates }}"
when:
- podsecuritypolicy_enabled
- cephfs_provisioner_namespace != "kube-system"
- name: CephFS Provisioner | Create manifests
template:

View File

@ -20,7 +20,3 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "delete"]
- apiGroups: ["policy"]
resourceNames: ["cephfs-provisioner"]
resources: ["podsecuritypolicies"]
verbs: ["use"]

View File

@ -1,44 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: cephfs-provisioner
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -49,15 +49,6 @@
- { name: rolebinding-rbd-provisioner, file: rolebinding-rbd-provisioner.yml, type: rolebinding }
- { name: deploy-rbd-provisioner, file: deploy-rbd-provisioner.yml, type: deploy }
- { name: sc-rbd-provisioner, file: sc-rbd-provisioner.yml, type: sc }
rbd_provisioner_templates_for_psp:
- { name: psp-rbd-provisioner, file: psp-rbd-provisioner.yml, type: psp }
- name: RBD Provisioner | Append extra templates to RBD Provisioner Templates list for PodSecurityPolicy
set_fact:
rbd_provisioner_templates: "{{ rbd_provisioner_templates_for_psp + rbd_provisioner_templates }}"
when:
- podsecuritypolicy_enabled
- rbd_provisioner_namespace != "kube-system"
- name: RBD Provisioner | Create manifests
template:

View File

@ -24,7 +24,3 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "delete"]
- apiGroups: ["policy"]
resourceNames: ["rbd-provisioner"]
resources: ["podsecuritypolicies"]
verbs: ["use"]

View File

@ -1,44 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: rbd-provisioner
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -11,21 +11,6 @@
when:
- matallb_auto_assign is defined
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
set_fact:
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Lay Down MetalLB
become: true
template:

View File

@ -1504,14 +1504,6 @@ rules:
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- controller
resources:
- podsecuritypolicies
verbs:
- use
- apiGroups:
- admissionregistration.k8s.io
resourceNames:
@ -1597,14 +1589,6 @@ rules:
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- speaker
resources:
- podsecuritypolicies
verbs:
- use
{% endif %}
---

View File

@ -42,17 +42,6 @@
- { name: registry-secrets, file: registry-secrets.yml, type: secrets }
- { name: registry-cm, file: registry-cm.yml, type: cm }
- { name: registry-rs, file: registry-rs.yml, type: rs }
registry_templates_for_psp:
- { name: registry-psp, file: registry-psp.yml, type: psp }
- { name: registry-cr, file: registry-cr.yml, type: clusterrole }
- { name: registry-crb, file: registry-crb.yml, type: rolebinding }
- name: Registry | Append extra templates to Registry Templates list for PodSecurityPolicy
set_fact:
registry_templates: "{{ registry_templates[:2] + registry_templates_for_psp + registry_templates[2:] }}"
when:
- podsecuritypolicy_enabled
- registry_namespace != "kube-system"
- name: Registry | Append nginx ingress templates to Registry Templates list when ingress enabled
set_fact:

View File

@ -1,15 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:registry
namespace: {{ registry_namespace }}
rules:
- apiGroups:
- policy
resourceNames:
- registry
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,13 +0,0 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp:registry
namespace: {{ registry_namespace }}
subjects:
- kind: ServiceAccount
name: registry
namespace: {{ registry_namespace }}
roleRef:
kind: ClusterRole
name: psp:registry
apiGroup: rbac.authorization.k8s.io

View File

@ -1,44 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: registry
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
- 'persistentVolumeClaim'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: false

View File

@ -232,12 +232,6 @@
tags:
- kubeadm_token
- name: PodSecurityPolicy | install PodSecurityPolicy
include_tasks: psp-install.yml
when:
- podsecuritypolicy_enabled
- inventory_hostname == first_kube_control_plane
- name: Kubeadm | Join other masters
include_tasks: kubeadm-secondary.yml

View File

@ -80,11 +80,6 @@
- upgrade
ignore_errors: true # noqa ignore-errors
- name: Disable SecurityContextDeny admission-controller and enable PodSecurityPolicy
set_fact:
kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
when: podsecuritypolicy_enabled
- name: Define nodes already joined to existing cluster and first_kube_control_plane
import_tasks: define-first-kube-control.yml

View File

@ -1,38 +0,0 @@
---
- name: Check AppArmor status
command: which apparmor_parser
register: apparmor_status
failed_when: false
changed_when: apparmor_status.rc != 0
- name: Set apparmor_enabled
set_fact:
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
- name: Render templates for PodSecurityPolicy
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
mode: 0640
register: psp_manifests
with_items:
- {file: psp.yml, type: psp, name: psp}
- {file: psp-cr.yml, type: clusterrole, name: psp-cr}
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
- name: Add policies, roles, bindings for PodSecurityPolicy
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
register: result
until: result is succeeded
retries: 10
delay: 6
with_items: "{{ psp_manifests.results }}"
environment:
KUBECONFIG: "{{ kube_config_dir }}/admin.conf"
loop_control:
label: "{{ item.item.file }}"

View File

@ -1,32 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:privileged
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: psp:restricted
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- restricted
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,54 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: psp:any:restricted
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:restricted
subjects:
- kind: Group
name: system:authenticated
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: psp:kube-system:privileged
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:privileged
subjects:
- kind: Group
name: system:masters
apiGroup: rbac.authorization.k8s.io
- kind: Group
name: system:serviceaccounts:kube-system
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: psp:nodes:privileged
namespace: kube-system
annotations:
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
be used in combination with the NodeRestriction admission plugin to limit
nodes to mirror pods bound to themselves.'
labels:
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:privileged
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:nodes
- kind: User
apiGroup: rbac.authorization.k8s.io
# Legacy node ID
name: kubelet

View File

@ -1,27 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
annotations:
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
{% if apparmor_enabled %}
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
{% endif %}
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
{{ podsecuritypolicy_restricted_spec | to_yaml(indent=2, width=1337) | indent(width=2) }}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: privileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
{{ podsecuritypolicy_privileged_spec | to_yaml(indent=2, width=1337) | indent(width=2) }}

View File

@ -102,7 +102,6 @@ loadbalancer_apiserver_pod_name: "{% if loadbalancer_apiserver_type == 'nginx' %
# - extensions/v1beta1/deployments=true
# - extensions/v1beta1/replicasets=true
# - extensions/v1beta1/networkpolicies=true
# - extensions/v1beta1/podsecuritypolicies=true
# A port range to reserve for services with NodePort visibility.
# Inclusive at both ends of the range.

View File

@ -608,7 +608,6 @@ etcd_events_peer_addresses: |-
{{ hostvars[item].etcd_member_name | default("etcd" + loop.index | string) }}-events=https://{{ hostvars[item].etcd_events_access_address | default(hostvars[item].ip | default(fallback_ips[item])) }}:2382{% if not loop.last %},{% endif %}
{%- endfor %}
podsecuritypolicy_enabled: false
etcd_heartbeat_interval: "250"
etcd_election_timeout: "5000"
etcd_snapshot_count: "10000"

View File

@ -172,14 +172,6 @@ rules:
- create
- update
- delete
- apiGroups:
- policy
resourceNames:
- calico-apiserver
resources:
- podsecuritypolicies
verbs:
- use
---

View File

@ -71,16 +71,7 @@ rules:
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
{% if calico_datastore == "etcd" %}
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use
{% elif calico_datastore == "kdd" %}
{% if calico_datastore == "kdd" %}
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.