From c35da613d918eb7a28b57bdb71efcaf073209fc8 Mon Sep 17 00:00:00 2001 From: Marco van Zijl Date: Sat, 3 May 2025 15:41:51 +0200 Subject: [PATCH] Add Rook Ceph cluster application configuration YAML --- applications/templates/rook-ceph-cluster.yaml | 147 ++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 applications/templates/rook-ceph-cluster.yaml diff --git a/applications/templates/rook-ceph-cluster.yaml b/applications/templates/rook-ceph-cluster.yaml new file mode 100644 index 0000000..acf1870 --- /dev/null +++ b/applications/templates/rook-ceph-cluster.yaml @@ -0,0 +1,147 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: rook-ceph-cluster + namespace: argocd +spec: + project: default + destination: + server: {{ .Values.spec.destination.server }} + namespace: ceph + syncPolicy: + automated: + prune: true + selfHeal: true + source: + chart: rook-ceph-cluster + repoURL: https://charts.rook.io/release + targetRevision: 1.17.1 + helm: + valuesObject: + operatorNamespace: ceph + toolbox: + enabled: false + monitoring: + enabled: false + cephClusterSpec: + mgr: + modules: + - name: pg_autoscaler + enabled: true + - name: devicehealth + enabled: true + - name: diskprediction_local + enabled: true + - name: rook + enabled: true + dashboard: + ssl: false + network: + provider: host + cephBlockPools: + - name: rbd-slow + spec: + replicated: + size: 2 + failureDomain: host + deviceClass: hdd + parameters: + min_size: "1" + storageClass: + name: rbd-slow + enabled: true + isDefault: false + reclaimPolicy: Retain + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + parameters: + imageFeatures: "layering,exclusive-lock,object-map,fast-diff" + csi.storage.k8s.io/fstype: ext4 + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" + cephFileSystems: + - name: veda-fs + spec: + metadataPool: + failureDomain: host + replicated: + size: 2 + deviceClass: hdd + parameters: + min_size: "1" + dataPools: + - failureDomain: host + replicated: + size: 2 + deviceClass: hdd + parameters: + min_size: "1" + name: data0 + preserveFilesystemOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: false + resources: + requests: + cpu: "250m" + memory: "1Gi" + priorityClassName: system-cluster-critical + storageClass: + name: veda-fs + enabled: true + isDefault: false + pool: bulk + reclaimPolicy: Retain + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + parameters: + csi.storage.k8s.io/fstype: ext4 + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" + cephObjectStores: + - name: rgw-bulk + spec: + metadataPool: + failureDomain: host + replicated: + size: 2 + deviceClass: hdd + parameters: + min_size: "1" + dataPool: + failureDomain: host + replicated: + size: 2 + deviceClass: hdd + parameters: + min_size: "1" + preservePoolsOnDelete: true + gateway: + port: 80 + instances: 2 + hostNetwork: false + resources: + requests: + cpu: "500m" + memory: "512Mi" + storageClass: + name: rgw-bulk + enabled: true + reclaimPolicy: Retain + volumeBindingMode: "Immediate" + ingress: + enabled: true + annotations: + traefik.ingress.kubernetes.io/router.entrypoints: web + host: + name: s3.noxxos.nl + path: / + \ No newline at end of file