New addon: local_volume_provisioner (#1909)

pull/1917/head
Matthew Mosesohn 2017-11-01 14:25:35 +00:00 committed by GitHub
parent ef0a91da27
commit c0e989b17c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 226 additions and 2 deletions

View File

@ -0,0 +1,67 @@
# Local Storage Provisioner
The local storage provisioner is NOT a dynamic storage provisioner as you would
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
all manually created volumes located in the directory `local_volume_base_dir`.
The default path is /mnt/disks and the rest of this doc will use that path as
an example.
## Examples to create local storage volumes
### tmpfs method:
```
for vol in vol1 vol2 vol3; do
mkdir /mnt/disks/$vol
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
done
```
The tmpfs method is not recommended for production because the mount is not
persistent and data will be deleted on reboot.
### Mount physical disks
```
mkdir /mnt/disks/ssd1
mount /dev/vdb1 /mnt/disks/ssd1
```
Physical disks are recommended for production environments because it offers
complete isolation in terms of I/O and capacity.
### File-backed sparsefile method
```
truncate /mnt/disks/disk5 --size 2G
mkfs.ext4 /mnt/disks/disk5
mkdir /mnt/disks/vol5
mount /mnt/disks/disk5 /mnt/disks/vol5
```
If you have a development environment and only one disk, this is the best way
to limit the quota of persistent volumes.
### Simple directories
```
for vol in vol6 vol7 vol8; do
mkdir /mnt/disks/$vol
done
```
This is also acceptable in a development environment, but there is no capacity
management.
## Usage notes
The volume provisioner cannot calculate volume sizes correctly, so you should
delete the daemonset pod on the relevant host after creating volumes. The pod
will be recreated and read the size correctly.
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
CoreOS/Container Linux). Pods with persistent volume claims will not be
able to start if the mounts become unavailable.
## Further reading
Refer to the upstream docs here: https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume

View File

@ -151,9 +151,12 @@ efk_enabled: false
# Helm deployment
helm_enabled: false
# Istio depoyment
# Istio deployment
istio_enabled: false
# Local volume provisioner deployment
local_volumes_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
# kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts

View File

@ -0,0 +1,6 @@
---
local_volume_provisioner_bootstrap_image_repo: quay.io/external_storage/local-volume-provisioner-bootstrap
local_volume_provisioner_bootstrap_image_tag: v1.0.0
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
local_volume_provisioner_image_tag: v1.0.0

View File

@ -0,0 +1,42 @@
---
- name: Local Volume Provisioner | Ensure base dir is created on all hosts
file:
path: "{{ local_volume_base_dir }}"
ensure: directory
owner: root
group: root
mode: 0700
delegate_to: "{{ item }}"
with_items: "{{ groups['k8s-cluster'] }}"
failed_when: false
- name: Local Volume Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/local_volume_provisioner"
owner: root
group: root
mode: 0755
recurse: true
- name: Local Volume Provisioner | Create manifests
template:
src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/addons/local_volume_provisioner/{{item.file}}"
with_items:
- {name: local-storage-provisioner-pv-binding, file: provisioner-admin-account.yml, type: clusterrolebinding}
- {name: local-volume-config, file: volume-config.yml, type: configmap}
- {name: local-volume-provisioner, file: provisioner-ds.yml, type: daemonset}
register: local_volume_manifests
when: inventory_hostname == groups['kube-master'][0]
- name: Local Volume Provisioner | Apply manifests
kube:
name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/addons/local_volume_provisioner/{{item.item.file}}"
state: "latest"
with_items: "{{ local_volume_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]

View File

@ -0,0 +1,34 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-storage-admin
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-pv-binding
namespace: default
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: default
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-node-binding
namespace: default
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: default
roleRef:
kind: ClusterRole
name: system:node
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,42 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: local-volume-provisioner
namespace: "{{ system_namespace }}"
spec:
template:
metadata:
labels:
app: local-volume-provisioner
spec:
containers:
- name: provisioner
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
securityContext:
privileged: true
volumeMounts:
- name: discovery-vol
mountPath: "/local-disks"
- name: local-volume-config
mountPath: /etc/provisioner/config/
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumes:
- name: discovery-vol
hostPath:
path: "{{ local_volume_base_dir }}"
- configMap:
defaultMode: 420
name: local-volume-config
name: local-volume-config
serviceAccount: local-storage-admin

View File

@ -0,0 +1,12 @@
# The config map is used to configure local volume discovery for Local SSDs on GCE and GKE.
# It is a map from storage class to its mount configuration.
apiVersion: v1
kind: ConfigMap
metadata:
name: local-volume-config
namespace: {{ system_namespace }}
data:
storageClassMap: |
local-storage:
hostDir: "{{ local_volume_base_dir }}"
mountDir: "/mnt/local-storage/"

View File

@ -20,6 +20,14 @@ dependencies:
tags:
- apps
- helm
- role: kubernetes-apps/local_volume_provisioner
when: local_volumes_enabled
tags:
- apps
- local_volume_provisioner
- storage
# istio role should be last because it takes a long time to initialize and
# will cause timeouts trying to start other addons.
- role: kubernetes-apps/istio
when: istio_enabled
tags:

View File

@ -26,6 +26,7 @@
-v /var/run:/var/run:rw \
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
-v {{ local_volume_base_dir }}:{{ local_volume_base_dir }}:shared \
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
./hyperkube kubelet \
"$@"

View File

@ -32,6 +32,7 @@ ExecStart=/usr/bin/rkt run \
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
--volume local-volume-base-dir,target {{ local_volume_base_dir }},readOnly=false,recursive=true \
--mount volume=etc-cni,target=/etc/cni \
--mount volume=opt-cni,target=/opt/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
@ -49,6 +50,7 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \
--mount volume=hosts,target=/etc/hosts \
--mount volume=local-volume-base-dir,target={{ local_volume_base_dir }} \
--stage1-from-dir=stage1-fly.aci \
{% if kube_hyperkube_image_repo == "docker" %}
--insecure-options=image \

View File

@ -136,10 +136,16 @@ kubectl_localhost: false
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Addons which can be enabled
efk_enabled: false
helm_enabled: false
istio_enabled: false
enable_network_policy: false
local_volumes_enabled: false
# Base path for local volume provisioner addon
local_volume_base_dir: /mnt/disks
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
@ -160,7 +166,7 @@ rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates: ['Initializers=true']
kube_feature_gates: ['Initializers=true', 'PersistentLocalVolumes={{ local_volumes_enabled|string }}']
# Vault data dirs.
vault_base_dir: /etc/vault

View File

@ -9,6 +9,7 @@ kube_network_plugin: flannel
helm_enabled: true
istio_enabled: true
efk_enabled: true
local_volumes_enabled: true
deploy_netchecker: true
kubedns_min_replicas: 1
cloud_provider: gce