Added new Rook-Ceph Application

This commit is contained in:
Marco van Zijl 2025-11-08 20:04:46 +01:00
parent dfea081ee3
commit bf5fa15ce7
6 changed files with 343 additions and 0 deletions

View File

@ -0,0 +1,7 @@
apiVersion: v2
name: rook-ceph-cluster
version: 1.0.0
dependencies:
- name: rook-ceph-cluster
version: v1.18.6
repository: https://charts.rook.io/release

View File

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rook-ceph-cluster
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "0"
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.mvzijl.nl/marco/veda.git
targetRevision: applicationset-rewrite
path: apps/ceph/cluster
helm:
releaseName: rook-ceph-cluster
valueFiles:
- values.yaml
destination:
server: https://kubernetes.default.svc
namespace: rook-ceph
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@ -0,0 +1,247 @@
rook-ceph-cluster:
operatorNamespace: rook-ceph
toolbox:
enabled: true
resources:
requests:
cpu: 100m
memory: 128Mi
monitoring:
enabled: true
ingress:
dashboard:
enabled: false # We'll use Gateway API instead
cephClusterSpec:
crashCollector:
disable: true
mgr:
modules:
- name: pg_autoscaler
enabled: true
- name: devicehealth
enabled: true
- name: diskprediction_local
enabled: true
- name: rook
enabled: true
dashboard:
enabled: true
ssl: false
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "256Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "200m"
memory: "1Gi"
osd:
limits:
memory: "4Gi"
requests:
cpu: "100m"
memory: "2Gi"
prepareosd:
requests:
cpu: "100m"
memory: "50Mi"
mgr-sidecar:
limits:
memory: "100Mi"
requests:
cpu: "100m"
memory: "40Mi"
crashcollector:
limits:
memory: "60Mi"
requests:
cpu: "100m"
memory: "60Mi"
logcollector:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cleanup:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cephBlockPools:
- name: ceph-blockpool
spec:
replicated:
size: 2
failureDomain: host
deviceClass: hdd
parameters:
min_size: "1"
storageClass:
name: ceph-block
enabled: true
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
parameters:
imageFeatures: "layering,exclusive-lock,object-map,fast-diff"
csi.storage.k8s.io/fstype: ext4
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
failureDomain: host
replicated:
size: 2
deviceClass: hdd
parameters:
min_size: "1"
dataPools:
- failureDomain: host
replicated:
size: 2
deviceClass: hdd
parameters:
min_size: "1"
name: data0
preserveFilesystemOnDelete: true
metadataServer:
activeCount: 1
activeStandby: true
resources:
requests:
cpu: "250m"
memory: "1Gi"
priorityClassName: system-cluster-critical
storageClass:
name: ceph-filesystem
enabled: true
isDefault: false
pool: data0
reclaimPolicy: Retain
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
parameters:
csi.storage.k8s.io/fstype: ext4
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
cephObjectStores:
- name: ceph-objectstore
spec:
metadataPool:
failureDomain: host
replicated:
size: 2
deviceClass: hdd
parameters:
min_size: "1"
dataPool:
failureDomain: host
replicated:
size: 2
deviceClass: hdd
parameters:
min_size: "1"
preservePoolsOnDelete: true
gateway:
port: 80
instances: 1
resources:
requests:
cpu: "200m"
memory: "512Mi"
storageClass:
name: ceph-bucket
enabled: true
reclaimPolicy: Delete
volumeBindingMode: "Immediate"
ingress:
enabled: false # We'll use Gateway API instead
# Gateway API HTTPRoutes for Ceph Dashboard and S3
extraObjects:
# ReferenceGrant for Gateway to access rook-ceph namespace
- apiVersion: gateway.networking.k8s.io/v1beta1
kind: ReferenceGrant
metadata:
name: traefik-gateway-access
namespace: rook-ceph
spec:
from:
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: rook-ceph
to:
- group: ""
kind: Service
# Ceph Dashboard HTTPRoute
- apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: ceph-dashboard
namespace: rook-ceph
spec:
parentRefs:
- name: traefik-gateway
namespace: traefik
sectionName: websecure
hostnames:
- "ceph.noxxos.nl"
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: rook-ceph-mgr-dashboard
port: 7000
# S3 Object Storage HTTPRoute
- apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: ceph-s3
namespace: rook-ceph
spec:
parentRefs:
- name: traefik-gateway
namespace: traefik
sectionName: websecure
hostnames:
- "s3.noxxos.nl"
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: rook-ceph-rgw-ceph-objectstore
port: 80

View File

@ -0,0 +1,7 @@
apiVersion: v2
name: rook-ceph-operator
version: 1.0.0
dependencies:
- name: rook-ceph
version: v1.18.6
repository: https://charts.rook.io/release

View File

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rook-ceph-operator
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-1"
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://git.mvzijl.nl/marco/veda.git
targetRevision: applicationset-rewrite
path: apps/ceph/operator
helm:
releaseName: rook-ceph-operator
valueFiles:
- values.yaml
destination:
server: https://kubernetes.default.svc
namespace: rook-ceph
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@ -0,0 +1,24 @@
rook-ceph:
crds:
enabled: true
monitoring:
enabled: true
priorityClassName: system-cluster-critical
csi:
enableMetadata: true
serviceMonitor:
enabled: true
enableDiscoveryDaemon: true
discoveryDaemonInterval: 6h
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi