|
| 1 | +################################################################################################################# |
| 2 | +# Create a filesystem with settings with replication enabled for a production environment. |
| 3 | +# A minimum of 3 OSDs on different nodes are required in this example. |
| 4 | +# If one mds daemon per node is too restrictive, see the podAntiAffinity below. |
| 5 | +# kubectl create -f filesystem.yaml |
| 6 | +################################################################################################################# |
| 7 | + |
| 8 | +apiVersion: ceph.rook.io/v1 |
| 9 | +kind: CephFilesystem |
| 10 | +metadata: |
| 11 | + name: rook-shared-fs |
| 12 | + namespace: rook-ceph # namespace:cluster |
| 13 | +spec: |
| 14 | + # The metadata pool spec. Must use replication. |
| 15 | + metadataPool: |
| 16 | + replicated: |
| 17 | + size: 3 |
| 18 | + requireSafeReplicaSize: true |
| 19 | + parameters: |
| 20 | + # Inline compression mode for the data pool |
| 21 | + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression |
| 22 | + compression_mode: |
| 23 | + none |
| 24 | + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool |
| 25 | + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size |
| 26 | + #target_size_ratio: ".5" |
| 27 | + # The list of data pool specs. Can use replication or erasure coding. |
| 28 | + dataPools: |
| 29 | + - name: replicated |
| 30 | + failureDomain: host |
| 31 | + replicated: |
| 32 | + size: 3 |
| 33 | + # Disallow setting pool with replica 1, this could lead to data loss without recovery. |
| 34 | + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want |
| 35 | + requireSafeReplicaSize: true |
| 36 | + parameters: |
| 37 | + # Inline compression mode for the data pool |
| 38 | + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression |
| 39 | + compression_mode: |
| 40 | + none |
| 41 | + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool |
| 42 | + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size |
| 43 | + #target_size_ratio: ".5" |
| 44 | + # Whether to preserve filesystem after CephFilesystem CRD deletion |
| 45 | + preserveFilesystemOnDelete: true |
| 46 | + # The metadata service (mds) configuration |
| 47 | + metadataServer: |
| 48 | + # The number of active MDS instances |
| 49 | + activeCount: 1 |
| 50 | + # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. |
| 51 | + # If false, standbys will be available, but will not have a warm cache. |
| 52 | + activeStandby: true |
| 53 | + # The affinity rules to apply to the mds deployment |
| 54 | + placement: |
| 55 | + # nodeAffinity: |
| 56 | + # requiredDuringSchedulingIgnoredDuringExecution: |
| 57 | + # nodeSelectorTerms: |
| 58 | + # - matchExpressions: |
| 59 | + # - key: role |
| 60 | + # operator: In |
| 61 | + # values: |
| 62 | + # - mds-node |
| 63 | + # topologySpreadConstraints: |
| 64 | + # tolerations: |
| 65 | + # - key: mds-node |
| 66 | + # operator: Exists |
| 67 | + # podAffinity: |
| 68 | + podAntiAffinity: |
| 69 | + requiredDuringSchedulingIgnoredDuringExecution: |
| 70 | + - labelSelector: |
| 71 | + matchExpressions: |
| 72 | + - key: app |
| 73 | + operator: In |
| 74 | + values: |
| 75 | + - rook-ceph-mds |
| 76 | + ## Add this if you want to allow mds daemons for different filesystems to run on one |
| 77 | + ## node. The value in "values" must match .metadata.name. |
| 78 | + # - key: rook_file_system |
| 79 | + # operator: In |
| 80 | + # values: |
| 81 | + # - rook-shared-fs |
| 82 | + # topologyKey: kubernetes.io/hostname will place MDS across different hosts |
| 83 | + topologyKey: kubernetes.io/hostname |
| 84 | + preferredDuringSchedulingIgnoredDuringExecution: |
| 85 | + - weight: 100 |
| 86 | + podAffinityTerm: |
| 87 | + labelSelector: |
| 88 | + matchExpressions: |
| 89 | + - key: app |
| 90 | + operator: In |
| 91 | + values: |
| 92 | + - rook-ceph-mds |
| 93 | + # topologyKey: */zone can be used to spread MDS across different AZ |
| 94 | + topologyKey: topology.kubernetes.io/zone |
| 95 | + # A key/value list of annotations |
| 96 | + # annotations: |
| 97 | + # key: value |
| 98 | + # A key/value list of labels |
| 99 | + # labels: |
| 100 | + # key: value |
| 101 | + # resources: |
| 102 | + # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory |
| 103 | + # limits: |
| 104 | + # memory: "1024Mi" |
| 105 | + # requests: |
| 106 | + # cpu: "500m" |
| 107 | + # memory: "1024Mi" |
| 108 | + priorityClassName: system-cluster-critical |
| 109 | + livenessProbe: |
| 110 | + disabled: false |
| 111 | + startupProbe: |
| 112 | + disabled: false |
| 113 | + # Filesystem mirroring settings |
| 114 | + # mirroring: |
| 115 | + # enabled: true |
| 116 | + # # list of Kubernetes Secrets containing the peer token |
| 117 | + # # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers |
| 118 | + # # Add the secret name if it already exists else specify the empty list here. |
| 119 | + # peers: |
| 120 | + # secretNames: |
| 121 | + # - secondary-cluster-peer |
| 122 | + # # specify the schedule(s) on which snapshots should be taken |
| 123 | + # # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules |
| 124 | + # snapshotSchedules: |
| 125 | + # - path: / |
| 126 | + # interval: 24h # daily snapshots |
| 127 | + # # The startTime should be mentioned in the format YYYY-MM-DDTHH:MM:SS |
| 128 | + # # If startTime is not specified, then by default the start time is considered as midnight UTC. |
| 129 | + # # see usage here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#usage |
| 130 | + # # startTime: 2022-07-15T11:55:00 |
| 131 | + # # manage retention policies |
| 132 | + # # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies |
| 133 | + # snapshotRetention: |
| 134 | + # - path: / |
| 135 | + # duration: "h 24" |
| 136 | +--- |
| 137 | +# create default csi subvolume group |
| 138 | +apiVersion: ceph.rook.io/v1 |
| 139 | +kind: CephFilesystemSubVolumeGroup |
| 140 | +metadata: |
| 141 | + name: rook-shared-fs-csi # lets keep the svg crd name same as `filesystem name + csi` for the default csi svg |
| 142 | + namespace: rook-ceph # namespace:cluster |
| 143 | +spec: |
| 144 | + # The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR. |
| 145 | + name: csi |
| 146 | + # filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created |
| 147 | + filesystemName: rook-shared-fs |
| 148 | + # reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups |
| 149 | + # only one out of (export, distributed, random) can be set at a time |
| 150 | + # by default pinning is set with value: distributed=1 |
| 151 | + # for disabling default values set (distributed=0) |
| 152 | + pinning: |
| 153 | + distributed: 1 # distributed=<0, 1> (disabled=0) |
| 154 | + # export: # export=<0-256> (disabled=-1) |
| 155 | + # random: # random=[0.0, 1.0](disabled=0.0) |
0 commit comments