apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: rook-ceph-cluster namespace: argocd spec: project: default destination: server: {{ .Values.spec.destination.server }} namespace: ceph syncPolicy: automated: prune: true selfHeal: true source: chart: rook-ceph-cluster repoURL: https://charts.rook.io/release targetRevision: 1.17.1 helm: valuesObject: operatorNamespace: ceph toolbox: enabled: false monitoring: enabled: false cephClusterSpec: mgr: modules: - name: pg_autoscaler enabled: true - name: devicehealth enabled: true - name: diskprediction_local enabled: true - name: rook enabled: true dashboard: ssl: false network: provider: host cephBlockPools: - name: rbd-slow spec: replicated: size: 2 failureDomain: host deviceClass: hdd parameters: min_size: "1" storageClass: name: rbd-slow enabled: true isDefault: false reclaimPolicy: Retain allowVolumeExpansion: true volumeBindingMode: "Immediate" parameters: imageFeatures: "layering,exclusive-lock,object-map,fast-diff" csi.storage.k8s.io/fstype: ext4 csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" cephFileSystems: - name: veda-fs spec: metadataPool: failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" dataPools: - failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" name: data0 preserveFilesystemOnDelete: true metadataServer: activeCount: 1 activeStandby: false resources: requests: cpu: "250m" memory: "1Gi" priorityClassName: system-cluster-critical storageClass: name: veda-fs enabled: true isDefault: false pool: bulk reclaimPolicy: Retain allowVolumeExpansion: true volumeBindingMode: "Immediate" parameters: csi.storage.k8s.io/fstype: ext4 csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" cephObjectStores: - name: rgw-bulk spec: metadataPool: failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" dataPool: failureDomain: host replicated: size: 2 deviceClass: hdd parameters: min_size: "1" preservePoolsOnDelete: true gateway: port: 80 instances: 2 hostNetwork: false resources: requests: cpu: "500m" memory: "512Mi" storageClass: name: rgw-bulk enabled: true reclaimPolicy: Retain volumeBindingMode: "Immediate" ingress: enabled: true annotations: traefik.ingress.kubernetes.io/router.entrypoints: web host: name: s3.noxxos.nl path: /