CephDeploymentHealth#
The CephDeploymentHealth object aggregates all statuses from the Ceph cluster. It is updated automatically by the pelagia’s health-controller.
Status fields include:
1. lastCheck: Time of the last status check.
2. lastUpdate: Time of the last update.
3. state: Current state of the status check (e.g., Ready means no issues).
4. messages: Descriptions of any detected issues.
5. miraCephGeneration: Generation of the MiraCeph object used for status checks.
6. fullClusterStatus: Overall cluster status including details on block storage, daemon statuses, usage, and conditions.
Example:
apiVersion: v1
items:
- apiVersion: lcm.mirantis.com/v1alpha1
kind: CephDeploymentHealth
metadata:
name: pelagia-ceph
namespace: pelagia
status:
healthReport:
cephDaemons:
cephCSIPluginDaemons:
csi-cephfsplugin:
info:
- 3/3 ready
status: ok
csi-rbdplugin:
info:
- 3/3 ready
status: ok
cephDaemons:
mds:
info:
- 'mds active: 1/1 (cephfs ''cephfs-store'')'
status: ok
mgr:
info:
- 'a is active mgr, standbys: [b]'
status: ok
mon:
info:
- 3 mons, quorum [a b c]
status: ok
osd:
info:
- 3 osds, 3 up, 3 in
status: ok
rgw:
info:
- '2 rgws running, daemons: [21273 38213]'
status: ok
clusterDetails:
cephEvents:
PgAutoscalerDetails:
state: Idle
rebalanceDetails:
state: Idle
rgwInfo:
publicEndpoint: https://192.10.1.101:443
usageDetails:
deviceClasses:
hdd:
availableBytes: "159676964864"
totalBytes: "161048690688"
usedBytes: "1371725824"
pools:
.mgr:
availableBytes: "75660169216"
totalBytes: "75661557760"
usedBytes: "1388544"
usedBytesPercentage: "0.001"
.rgw.root:
availableBytes: "75661426688"
totalBytes: "75661557760"
usedBytes: "131072"
usedBytesPercentage: "0.000"
cephfs-store-cephfs-pool-1:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
cephfs-store-metadata:
availableBytes: "75660517376"
totalBytes: "75661557760"
usedBytes: "1040384"
usedBytesPercentage: "0.001"
kubernetes-hdd:
availableBytes: "75661549568"
totalBytes: "75661557760"
usedBytes: "8192"
usedBytesPercentage: "0.000"
rgw-store.rgw.buckets.data:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
rgw-store.rgw.buckets.index:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
rgw-store.rgw.buckets.non-ec:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
rgw-store.rgw.control:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
rgw-store.rgw.log:
availableBytes: "75660230656"
totalBytes: "75661557760"
usedBytes: "1327104"
usedBytesPercentage: "0.001"
rgw-store.rgw.meta:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
rgw-store.rgw.otp:
availableBytes: "75661557760"
totalBytes: "75661557760"
usedBytes: "0"
usedBytesPercentage: "0.000"
osdAnalysis:
cephClusterSpecGeneration: 1
diskDaemon:
info:
- 3/3 ready
status: ok
specAnalysis:
cluster-storage-worker-0:
status: ok
cluster-storage-worker-1:
status: ok
cluster-storage-worker-2:
status: ok
rookCephObjects:
blockStorage:
cephBlockPools:
builtin-mgr:
info:
failureDomain: host
type: Replicated
observedGeneration: 1
phase: Ready
poolID: 11
builtin-rgw-root:
info:
failureDomain: host
type: Replicated
observedGeneration: 1
phase: Ready
poolID: 1
kubernetes-hdd:
info:
failureDomain: host
type: Replicated
observedGeneration: 1
phase: Ready
poolID: 10
cephCluster:
ceph:
capacity:
bytesAvailable: 159676964864
bytesTotal: 161048690688
bytesUsed: 1371725824
lastUpdated: "2025-08-15T12:10:39Z"
fsid: 92d56f80-b7a8-4a35-80ef-eb6a877c2a73
health: HEALTH_OK
lastChanged: "2025-08-14T14:07:43Z"
lastChecked: "2025-08-15T12:10:39Z"
previousHealth: HEALTH_WARN
versions:
mds:
ceph version 19.2.3 (c92aebb279828e9c3c1f5d24613efca272649e62) squid (stable): 2
mgr:
ceph version 19.2.3 (c92aebb279828e9c3c1f5d24613efca272649e62) squid (stable): 2
mon:
ceph version 19.2.3 (c92aebb279828e9c3c1f5d24613efca272649e62) squid (stable): 3
osd:
ceph version 19.2.3 (c92aebb279828e9c3c1f5d24613efca272649e62) squid (stable): 3
overall:
ceph version 19.2.3 (c92aebb279828e9c3c1f5d24613efca272649e62) squid (stable): 12
rgw:
ceph version 19.2.3 (c92aebb279828e9c3c1f5d24613efca272649e62) squid (stable): 2
conditions:
- lastHeartbeatTime: "2025-08-15T12:10:40Z"
lastTransitionTime: "2025-08-12T09:35:27Z"
message: Cluster created successfully
reason: ClusterCreated
status: "True"
type: Ready
message: Cluster created successfully
observedGeneration: 1
phase: Ready
state: Created
storage:
deviceClasses:
- name: hdd
osd:
migrationStatus: {}
storeType:
bluestore: 3
version:
image: 127.0.0.1/ceph/ceph:v19.2.3
version: 19.2.3-0
objectStorage:
cephObjectStore:
rgw-store:
endpoints:
insecure:
- http://rook-ceph-rgw-rgw-store.rook-ceph.svc:8081
secure:
- https://rook-ceph-rgw-rgw-store.rook-ceph.svc:8443
info:
endpoint: http://rook-ceph-rgw-rgw-store.rook-ceph.svc:8081
secureEndpoint: https://rook-ceph-rgw-rgw-store.rook-ceph.svc:8443
observedGeneration: 1
phase: Ready
sharedFilesystem:
cephFilesystems:
cephfs-store:
observedGeneration: 1
phase: Ready
rookOperator:
status: ok
lastHealthCheck: "2025-08-15T12:11:00Z"
lastHealthUpdate: "2025-08-15T12:11:00Z"
state: Ok
kind: List
metadata:
resourceVersion: ""