################################################################################################################# # Define the settings for the rook-ceph cluster with common settings for a small test cluster. # All nodes with available raw devices will be used for the Ceph cluster. One node is sufficient # in this example.
# For example, to create the cluster: # kubectl create -f crds.yaml -f common.yaml -f operator.yaml # kubectl create -f cluster-test.yaml ################################################################################################################# kind: ConfigMap apiVersion: v1 metadata: name: rook-config-override namespace: rook-ceph # namespace:cluster data: config: | [global] osd_pool_default_size = 1 mon_warn_on_pool_no_redundancy = false bdev_flock_retry = 20 bluefs_buffered_io = false --- apiVersion: ceph.rook.io/v1 kind: CephCluster metadata: name: my-cluster namespace: rook-ceph # namespace:cluster spec: dataDirHostPath: /var/lib/rook cephVersion: image: quay.io/ceph/ceph:v17 allowUnsupported: true mon: count: 1 allowMultiplePerNode: true mgr: count: 1 allowMultiplePerNode: true dashboard: enabled: true crashCollector: disable: true storage: useAllNodes: false useAllDevices: false #deviceFilter: config: # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller osdsPerDevice: "1"# this value can be overridden at the node or device level # encryptedDevice: "true" # the default value for this option is "false" # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. nodes: - name: "amd-5700g" devices: # specific devices to use for storage can be specified for each node - name: "sda" deviceFilter: healthCheck: daemonHealth: mon: interval: 45s timeout: 600s priorityClassNames: all: system-node-critical mgr: system-cluster-critical disruptionManagement: managePodBudgets: true --- apiVersion: ceph.rook.io/v1 kind: CephBlockPool metadata: name: builtin-mgr namespace: rook-ceph # namespace:cluster spec: name: .mgr replicated: size: 1 requireSafeReplicaSize: false