apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: rook-ceph-cluster namespace: argocd spec: project: default destination: server: {{ .Values.spec.destination.server }} namespace: ceph syncPolicy: automated: prune: true selfHeal: true source: chart: rook-ceph-cluster repoURL: https://charts.rook.io/release targetRevision: 1.17.1 helm: valuesObject: operatorNamespace: ceph toolbox: enabled: true monitoring: enabled: true ingress: dashboard: annotations: traefik.ingress.kubernetes.io/router.entrypoints: websecure host: name: ceph.noxxos.nl path: / cephClusterSpec: crashCollector: disable: true mgr: modules: - name: pg_autoscaler enabled: true - name: devicehealth enabled: true - name: diskprediction_local enabled: true - name: rook enabled: true dashboard: ssl: false resources: mgr: limits: memory: "1Gi" requests: cpu: "100m" memory: "256Mi" mon: limits: memory: "2Gi" requests: cpu: "200m" memory: "1Gi" osd: limits: memory: "4Gi" requests: cpu: "100m" memory: "2Gi" prepareosd: requests: cpu: "100m" memory: "50Mi" mgr-sidecar: limits: memory: "100Mi" requests: cpu: "100m" memory: "40Mi" crashcollector: limits: memory: "60Mi" requests: cpu: "100m" memory: "60Mi" logcollector: limits: memory: "1Gi" requests: cpu: "100m" memory: "100Mi" cleanup: limits: memory: "1Gi" requests: cpu: "100m" memory: "100Mi" cephBlockPools: - name: rbd-slow spec: replicated: size: 2 failureDomain: host deviceClass: hdd parameters: min_size: "1" storageClass: name: rbd-slow enabled: true isDefault: false reclaimPolicy: Retain allowVolumeExpansion: true volumeBindingMode: "Immediate" parameters: imageFeatures: "layering,exclusive-lock,object-map,fast-diff" csi.storage.k8s.io/fstype: ext4 csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-namespace: ceph csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: ceph csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-namespace: ceph cephFileSystems: - name: veda-fs spec: metadataPool: failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" dataPools: - failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" name: data0 preserveFilesystemOnDelete: true metadataServer: activeCount: 1 activeStandby: false resources: requests: cpu: "250m" memory: "1Gi" priorityClassName: system-cluster-critical storageClass: name: veda-fs enabled: true isDefault: false pool: bulk reclaimPolicy: Retain allowVolumeExpansion: true volumeBindingMode: "Immediate" parameters: csi.storage.k8s.io/fstype: ext4 csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/provisioner-secret-namespace: ceph csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: ceph csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node csi.storage.k8s.io/node-stage-secret-namespace: ceph cephObjectStores: - name: rgw-bulk spec: metadataPool: failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" dataPool: failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" preservePoolsOnDelete: true gateway: port: 80 instances: 1 resources: requests: cpu: "200m" memory: "512Mi" storageClass: name: rgw-bulk enabled: true reclaimPolicy: Retain volumeBindingMode: "Immediate" ingress: enabled: true annotations: traefik.ingress.kubernetes.io/router.entrypoints: websecure host: name: s3.noxxos.nl path: /