Skip to content

Commit b6e105c

Browse files
laveryagithub-actions[bot]
authored andcommitted
Create new Rook version
1 parent 43417c8 commit b6e105c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+20843
-2
lines changed

addons/rook/1.17.2/Manifest

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
yum lvm2
2+
yumol lvm2
3+
apt lvm2
4+
5+
image rook-ceph docker.io/rook/ceph:v1.17.2
6+
image ceph-ceph quay.io/ceph/ceph:v19.2.2
7+
image cephcsi-cephcsi quay.io/cephcsi/cephcsi:v3.14.0
8+
image sig-storage-csi-node-driver-registrar registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0
9+
image sig-storage-csi-resizer registry.k8s.io/sig-storage/csi-resizer:v1.13.1
10+
image sig-storage-csi-provisioner registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
11+
image sig-storage-csi-snapshotter registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0
12+
image sig-storage-csi-attacher registry.k8s.io/sig-storage/csi-attacher:v4.8.0
13+
image csiaddons-k8s-sidecar quay.io/csiaddons/k8s-sidecar:v0.12.0
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
apiVersion: storage.k8s.io/v1
2+
kind: StorageClass
3+
metadata:
4+
name: rook-cephfs
5+
provisioner: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name
6+
parameters:
7+
# clusterID is the namespace where the rook cluster is running
8+
# If you change this namespace, also change the namespace below where the secret namespaces are defined
9+
clusterID: rook-ceph # namespace:cluster
10+
11+
# CephFS filesystem name into which the volume shall be created
12+
fsName: rook-shared-fs
13+
14+
# Ceph pool into which the volume shall be created
15+
# Required for provisionVolume: "true"
16+
pool: rook-shared-fs-replicated
17+
18+
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
19+
# in the same namespace as the cluster.
20+
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
21+
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
22+
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
23+
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
24+
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
25+
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
26+
27+
# (optional) Set it to true to encrypt each volume with encryption keys
28+
# from a key management system (KMS)
29+
# encrypted: "true"
30+
31+
# (optional) Use external key management system (KMS) for encryption key by
32+
# specifying a unique ID matching a KMS ConfigMap. The ID is only used for
33+
# correlation to configmap entry.
34+
# encryptionKMSID: <kms-config-id>
35+
36+
# (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
37+
# If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
38+
# or by setting the default mounter explicitly via --volumemounter command-line argument.
39+
# mounter: kernel
40+
reclaimPolicy: Delete
41+
allowVolumeExpansion: true
42+
mountOptions:
43+
# uncomment the following line for debugging
44+
#- debug
Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
#################################################################################################################
2+
# Create a filesystem with settings with replication enabled for a production environment.
3+
# A minimum of 3 OSDs on different nodes are required in this example.
4+
# If one mds daemon per node is too restrictive, see the podAntiAffinity below.
5+
# kubectl create -f filesystem.yaml
6+
#################################################################################################################
7+
8+
apiVersion: ceph.rook.io/v1
9+
kind: CephFilesystem
10+
metadata:
11+
name: rook-shared-fs
12+
namespace: rook-ceph # namespace:cluster
13+
spec:
14+
# The metadata pool spec. Must use replication.
15+
metadataPool:
16+
replicated:
17+
size: 3
18+
requireSafeReplicaSize: true
19+
parameters:
20+
# Inline compression mode for the data pool
21+
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
22+
compression_mode:
23+
none
24+
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
25+
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
26+
#target_size_ratio: ".5"
27+
# The list of data pool specs. Can use replication or erasure coding.
28+
dataPools:
29+
- name: replicated
30+
failureDomain: host
31+
replicated:
32+
size: 3
33+
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
34+
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
35+
requireSafeReplicaSize: true
36+
parameters:
37+
# Inline compression mode for the data pool
38+
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
39+
compression_mode:
40+
none
41+
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
42+
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
43+
#target_size_ratio: ".5"
44+
# Whether to preserve filesystem after CephFilesystem CRD deletion
45+
preserveFilesystemOnDelete: true
46+
# The metadata service (mds) configuration
47+
metadataServer:
48+
# The number of active MDS instances
49+
activeCount: 1
50+
# Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
51+
# If false, standbys will be available, but will not have a warm cache.
52+
activeStandby: true
53+
# The affinity rules to apply to the mds deployment
54+
placement:
55+
# nodeAffinity:
56+
# requiredDuringSchedulingIgnoredDuringExecution:
57+
# nodeSelectorTerms:
58+
# - matchExpressions:
59+
# - key: role
60+
# operator: In
61+
# values:
62+
# - mds-node
63+
# topologySpreadConstraints:
64+
# tolerations:
65+
# - key: mds-node
66+
# operator: Exists
67+
# podAffinity:
68+
podAntiAffinity:
69+
requiredDuringSchedulingIgnoredDuringExecution:
70+
- labelSelector:
71+
matchExpressions:
72+
- key: app
73+
operator: In
74+
values:
75+
- rook-ceph-mds
76+
## Add this if you want to allow mds daemons for different filesystems to run on one
77+
## node. The value in "values" must match .metadata.name.
78+
# - key: rook_file_system
79+
# operator: In
80+
# values:
81+
# - rook-shared-fs
82+
# topologyKey: kubernetes.io/hostname will place MDS across different hosts
83+
topologyKey: kubernetes.io/hostname
84+
preferredDuringSchedulingIgnoredDuringExecution:
85+
- weight: 100
86+
podAffinityTerm:
87+
labelSelector:
88+
matchExpressions:
89+
- key: app
90+
operator: In
91+
values:
92+
- rook-ceph-mds
93+
# topologyKey: */zone can be used to spread MDS across different AZ
94+
topologyKey: topology.kubernetes.io/zone
95+
# A key/value list of annotations
96+
# annotations:
97+
# key: value
98+
# A key/value list of labels
99+
# labels:
100+
# key: value
101+
# resources:
102+
# The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
103+
# limits:
104+
# memory: "1024Mi"
105+
# requests:
106+
# cpu: "500m"
107+
# memory: "1024Mi"
108+
priorityClassName: system-cluster-critical
109+
livenessProbe:
110+
disabled: false
111+
startupProbe:
112+
disabled: false
113+
# Filesystem mirroring settings
114+
# mirroring:
115+
# enabled: true
116+
# # list of Kubernetes Secrets containing the peer token
117+
# # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
118+
# # Add the secret name if it already exists else specify the empty list here.
119+
# peers:
120+
# secretNames:
121+
# - secondary-cluster-peer
122+
# # specify the schedule(s) on which snapshots should be taken
123+
# # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
124+
# snapshotSchedules:
125+
# - path: /
126+
# interval: 24h # daily snapshots
127+
# # The startTime should be mentioned in the format YYYY-MM-DDTHH:MM:SS
128+
# # If startTime is not specified, then by default the start time is considered as midnight UTC.
129+
# # see usage here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#usage
130+
# # startTime: 2022-07-15T11:55:00
131+
# # manage retention policies
132+
# # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
133+
# snapshotRetention:
134+
# - path: /
135+
# duration: "h 24"
136+
---
137+
# create default csi subvolume group
138+
apiVersion: ceph.rook.io/v1
139+
kind: CephFilesystemSubVolumeGroup
140+
metadata:
141+
name: rook-shared-fs-csi # lets keep the svg crd name same as `filesystem name + csi` for the default csi svg
142+
namespace: rook-ceph # namespace:cluster
143+
spec:
144+
# The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR.
145+
name: csi
146+
# filesystemName is the metadata name of the CephFilesystem CR where the subvolume group will be created
147+
filesystemName: rook-shared-fs
148+
# reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups
149+
# only one out of (export, distributed, random) can be set at a time
150+
# by default pinning is set with value: distributed=1
151+
# for disabling default values set (distributed=0)
152+
pinning:
153+
distributed: 1 # distributed=<0, 1> (disabled=0)
154+
# export: # export=<0-256> (disabled=-1)
155+
# random: # random=[0.0, 1.0](disabled=0.0)
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
apiVersion: storage.k8s.io/v1
3+
kind: StorageClass
4+
metadata:
5+
name: rook-cephfs
6+
parameters:
7+
pool: rook-shared-fs-data0
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
---
2+
apiVersion: ceph.rook.io/v1
3+
kind: CephFilesystem
4+
metadata:
5+
name: rook-shared-fs
6+
namespace: rook-ceph
7+
spec:
8+
metadataServer:
9+
placement:
10+
podAntiAffinity:
11+
requiredDuringSchedulingIgnoredDuringExecution: ~
12+
preferredDuringSchedulingIgnoredDuringExecution:
13+
- weight: 100
14+
podAffinityTerm:
15+
labelSelector:
16+
matchExpressions:
17+
- key: app
18+
operator: In
19+
values:
20+
- rook-ceph-mds
21+
# topologyKey: */zone can be used to spread MDS across different AZ
22+
# Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
23+
# Use <topologyKey: topology.kubernetes.io/zone> in k8s cluster is v1.17 or upper
24+
topologyKey: topology.kubernetes.io/zone
25+
- weight: 100
26+
podAffinityTerm:
27+
labelSelector:
28+
matchExpressions:
29+
- key: app
30+
operator: In
31+
values:
32+
- rook-ceph-mds
33+
# topologyKey: kubernetes.io/hostname will place MDS across different hosts
34+
topologyKey: kubernetes.io/hostname
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
---
2+
- op: replace
3+
path: /spec/dataPools/0/name
4+
value: data0
5+
- op: replace
6+
path: /spec/dataPools/0/replicated/size
7+
value: ${CEPH_POOL_REPLICAS}
8+
- op: replace
9+
path: /spec/dataPools/0/replicated/requireSafeReplicaSize
10+
value: false
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
---
2+
apiVersion: ceph.rook.io/v1
3+
kind: CephFilesystem
4+
metadata:
5+
name: rook-shared-fs
6+
namespace: rook-ceph
7+
spec:
8+
metadataPool:
9+
replicated:
10+
size: ${CEPH_POOL_REPLICAS}
11+
requireSafeReplicaSize: false
12+
metadataServer:
13+
resources:
14+
limits:
15+
cpu: "500m"
16+
memory: "1024Mi"
17+
requests:
18+
cpu: "500m"
19+
memory: "1024Mi"

0 commit comments

Comments
 (0)