kubernetes-guide/docs/chap08/8.9.md

4.7 KiB
Raw Permalink Blame History

8.9.1

vim ceph-configmap.yaml

apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "48ddd55b-28ce-43f3-92a8-d17d9ad2c0de",
        "monitors": [
          "xxx:6789",
          "xxx:6789",
          "xxx:6789"
        ],
        "cephFS": {
          "subvolumeGroup": "cephfs-k8s-csi"
        }
      }
    ]
metadata:
  name: ceph-csi-config

vim cephfs-csi-sc.yaml

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: csi-cephfs-sc
provisioner: cephfs.csi.ceph.com
parameters:
  clusterID: 48ddd55b-28ce-43f3-92a8-d17d9ad2c0de 

  fsName: sharefs

  pool: sharefs-data0

  # The secrets have to contain user and/or Ceph admin credentials.
  csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi-cephfs
  csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi-cephfs
  csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi-cephfs

  # (optional) The driver can use either ceph-fuse (fuse) or
  # ceph kernelclient (kernel).
  # If omitted, default volume mounter will be used - this is
  # determined by probing for ceph-fuse and mount.ceph
  # mounter: kernel

  # (optional) Prefix to use for naming subvolumes.
  # If omitted, defaults to "csi-vol-".
  # volumeNamePrefix: "foo-bar-"

reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
  - debug

vim pvc.yaml

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: cephfs-pvc-test-csi
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: csi-cephfs-sc
  resources:
    requests:
      storage: 100Mi

vim test-pvc-dp.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: test-cephfs
  name: test-cephfs
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: test-cephfs
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: test-cephfs
    spec:
      containers:
      - command:
        - sh
        - -c
        - sleep 36000
        image: registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools
        name: test-cephfs
        volumeMounts:
        - mountPath: /mnt
          name: cephfs-pvc-test
      volumes:
      - name: cephfs-pvc-test
        persistentVolumeClaim:
          claimName: cephfs-pvc-test-csi

8.9.2

ceph-configmap.yaml**

apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "48ddd55b-28ce-43f3-92a8-d17d9ad2c0de",
        "monitors": [
          "xxx:6789",
          "xxx:6789",
          "xxx:6789"
        ],
        "cephFS": {
          "subvolumeGroup": "cephrbd-k8s-csi"
        }
      }
    ]
metadata:
  name: ceph-csi-config

rbd-csi-sc.yaml

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
   clusterID: 48ddd55b-28ce-43f3-92a8-d17d9ad2c0de 
   pool: rbdfork8s
   imageFeatures: layering

   csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
   csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi-rbd 
   csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
   csi.storage.k8s.io/controller-expand-secret-namespace: ceph-csi-rbd
   csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
   csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi-rbd
   csi.storage.k8s.io/fstype: ext4
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
   - discard

pvc.yaml

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: rbd-pvc-test-csi
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: csi-rbd-sc
  resources:
    requests:
      storage: 100Mi

test-pvc-dp.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: test-rbd
  name: test-rbd
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: test-rbd
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: test-rbd
    spec:
      containers:
      - command:
        - sh
        - -c
        - sleep 36000
        image: registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools
        name: test-rbd
        volumeMounts:
        - mountPath: /mnt
          name: rbd-pvc-test
      volumes:
      - name: rbd-pvc-test
        persistentVolumeClaim:
          claimName: rbd-pvc-test-csi