1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
use crate::ffi;

#[derive(Debug, Clone)]
pub struct NameParseError;
impl core::fmt::Display for NameParseError {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        write!(f, "unrecognized name")
    }
}

impl std::error::Error for NameParseError {}

// Helper macro to generate iterable nums that translate into static strings mapped from the cpp
// land.
macro_rules! iterable_named_enum {
    (
    $(#[$m:meta])*
    $type_vis:vis enum $typename:ident {
        $(
            $(#[$variant_meta:meta])*
            $variant:ident($variant_str:literal) $(= $value:expr)?,
        )+
    }
    ) => {
        // Main Type
        #[allow(clippy::all)]
        $(#[$m])*
        $type_vis enum $typename {
            $(
            $(#[$variant_meta])*
            $variant$( = $value)?,
            )+
        }

        #[automatically_derived]
        impl $typename {
            #[doc = "The corresponding rocksdb string identifier for this variant"]
            pub const fn name(&self) -> &'static str {
                match self {
                    $(
                        $typename::$variant => $variant_str,
                    )+
                }
            }
            pub fn iter() -> ::core::slice::Iter<'static, $typename> {
                static VARIANTS: &'static [$typename] = &[
                    $(
                        $typename::$variant,
                    )+
                ];
                VARIANTS.iter()
            }
        }


        #[automatically_derived]
        impl ::core::str::FromStr for $typename {
            type Err = NameParseError;
            fn from_str(s: &str) -> Result<Self, Self::Err> {
                match s {
                    $(
                        $variant_str => Ok($typename::$variant),
                    )+
                    _ => Err(NameParseError),
                }
            }
        }

        #[automatically_derived]
        impl ::core::fmt::Display for $typename {
            fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
                self.name().fmt(f)
            }
        }
    };
}

/// StatsLevel can be used to reduce statistics overhead by skipping certain
/// types of stats in the stats collection process.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[repr(u8)]
pub enum StatsLevel {
    /// Disable all metrics
    DisableAll = 0,
    /// Disable timer stats, and skip histogram stats
    ExceptHistogramOrTimers = 2,
    /// Skip timer stats
    ExceptTimers,
    /// Collect all stats except time inside mutex lock AND time spent on
    /// compression.
    ExceptDetailedTimers,
    /// Collect all stats except the counters requiring to get time inside the
    /// mutex lock.
    ExceptTimeForMutex,
    /// Collect all stats, including measuring duration of mutex operations.
    /// If getting time is expensive on the platform to run, it can
    /// reduce scalability to more threads, especially for writes.
    All,
}

// Keep in-sync with rocksdb/include/rocksdb/statistics.h
iterable_named_enum! {
    #[derive(Debug, Copy, Clone, PartialEq, Eq)]
    #[repr(u32)]
    pub enum Ticker {
        /// total block cache misses
        /// REQUIRES: BlockCacheMiss == BlockCacheIndexMiss +
        ///                               BlockCacheFilterMiss +
        ///                               BlockCacheDataMiss;
        BlockCacheMiss("rocksdb.block.cache.miss") = 0,
        /// total block cache hit
        /// REQUIRES: BlockCacheHit == BlockCacheIndexHit +
        ///                              BlockCacheFilterHit +
        ///                              BlockCacheDataHit;
        BlockCacheHit("rocksdb.block.cache.hit"),
        /// # of blocks added to block cache.
        BlockCacheAdd("rocksdb.block.cache.add"),
        /// # of failures when adding blocks to block cache.
        BlockCacheAddFailures("rocksdb.block.cache.add.failures"),
        /// # of times cache miss when accessing index block from block cache.
        BlockCacheIndexMiss("rocksdb.block.cache.index.miss"),
        /// # of times cache hit when accessing index block from block cache.
        BlockCacheIndexHit("rocksdb.block.cache.index.hit"),
        /// # of index blocks added to block cache.
        BlockCacheIndexAdd("rocksdb.block.cache.index.add"),
        /// # of bytes of index blocks inserted into cache
        BlockCacheIndexBytesInsert("rocksdb.block.cache.index.bytes.insert"),
        /// # of times cache miss when accessing filter block from block cache.
        BlockCacheFilterMiss("rocksdb.block.cache.filter.miss"),
        /// # of times cache hit when accessing filter block from block cache.
        BlockCacheFilterHit("rocksdb.block.cache.filter.hit"),
        /// # of filter blocks added to block cache.
        BlockCacheFilterAdd("rocksdb.block.cache.filter.add"),
        /// # of bytes of bloom filter blocks inserted into cache
        BlockCacheFilterBytesInsert("rocksdb.block.cache.filter.bytes.insert"),
        /// # of times cache miss when accessing data block from block cache.
        BlockCacheDataMiss("rocksdb.block.cache.data.miss"),
        /// # of times cache hit when accessing data block from block cache.
        BlockCacheDataHit("rocksdb.block.cache.data.hit"),
        /// # of data blocks added to block cache.
        BlockCacheDataAdd("rocksdb.block.cache.data.add"),
        /// # of bytes of data blocks inserted into cache
        BlockCacheDataBytesInsert("rocksdb.block.cache.data.bytes.insert"),
        /// # of bytes read from cache.
        BlockCacheBytesRead("rocksdb.block.cache.bytes.read"),
        /// # of bytes written into cache.
        BlockCacheBytesWrite("rocksdb.block.cache.bytes.write"),

        /// # of times bloom filter has avoided file reads, i.e., negatives.
        BloomFilterUseful("rocksdb.bloom.filter.useful"),
        /// # of times bloom FullFilter has not avoided the reads.
        BloomFilterFullPositive("rocksdb.bloom.filter.full.positive"),
        /// # of times bloom FullFilter has not avoided the reads and data actually
        /// exist.
        BloomFilterFullTruePositive("rocksdb.bloom.filter.full.true.positive"),

        /// # persistent cache hit
        PersistentCacheHit("rocksdb.persistent.cache.hit"),
        /// # persistent cache miss
        PersistentCacheMiss("rocksdb.persistent.cache.miss"),

        /// # total simulation block cache hits
        SimBlockCacheHit("rocksdb.sim.block.cache.hit"),
        /// # total simulation block cache misses
        SimBlockCacheMiss("rocksdb.sim.block.cache.miss"),

        /// # of memtable hits.
        MemtableHit("rocksdb.memtable.hit"),
        /// # of memtable misses.
        MemtableMiss("rocksdb.memtable.miss"),

        /// # of Get() queries served by L0
        GetHitL0("rocksdb.l0.hit"),
        /// # of Get() queries served by L1
        GetHitL1("rocksdb.l1.hit"),
        /// # of Get() queries served by L2 and up
        GetHitL2AndUp("rocksdb.l2andup.hit"),

        /**
         * Compaction_KeyDrop* count the reasons for key drop during compaction
         * There are 4 reasons currently.
         */
        CompactionKeyDropNewerEntry("rocksdb.compaction.key.drop.new"),
        /// key was written with a newer value.
        /// Also includes keys dropped for range del.
        CompactionKeyDropObsolete("rocksdb.compaction.key.drop.obsolete"),
        /// The key is obsolete.
        CompactionKeyDropRangeDel("rocksdb.compaction.key.drop.range_del"),
        /// key was covered by a range tombstone.
        CompactionKeyDropUser("rocksdb.compaction.key.drop.user"),
        /// user compaction function has dropped the key.
        CompactionRangeDelDropObsolete("rocksdb.compaction.range_del.drop.obsolete"),
        /// all keys in range were deleted.
        /// Deletions obsoleted before bottom level due to file gap optimization.
        CompactionOptimizedDelDropObsolete("rocksdb.compaction.optimized.del.drop.obsolete"),
        /// If a compaction was canceled in sfm to prevent ENOSPC
        CompactionCancelled("rocksdb.compaction.cancelled"),

        /// Number of keys written to the database via the Put and Write call's
        NumberKeysWritten("rocksdb.number.keys.written"),
        /// Number of Keys read,
        NumberKeysRead("rocksdb.number.keys.read"),
        /// Number keys updated, if inplace update is enabled
        NumberKeysUpdated("rocksdb.number.keys.updated"),
        /// The number of uncompressed bytes issued by DB::Put(), DB::Delete(),
        /// DB::Merge(), and DB::Write().
        BytesWritten("rocksdb.bytes.written"),
        /// The number of uncompressed bytes read from DB::Get().  It could be
        /// either from memtables, cache, or table files.
        /// For the number of logical bytes read from DB::MultiGet(),
        /// please use NumberMultigetBytesRead.
        BytesRead("rocksdb.bytes.read"),
        /// The number of calls to seek/next/prev
        NumberDbSeek("rocksdb.number.db.seek"),
        NumberDbNext("rocksdb.number.db.next"),
        NumberDbPrev("rocksdb.number.db.prev"),
        /// The number of calls to seek/next/prev that returned data
        NumberDbSeekFound("rocksdb.number.db.seek.found"),
        NumberDbNextFound("rocksdb.number.db.next.found"),
        NumberDbPrevFound("rocksdb.number.db.prev.found"),
        /// The number of uncompressed bytes read from an iterator.
        /// Includes size of key and value.
        IterBytesRead("rocksdb.db.iter.bytes.read"),
        NoFileOpens("rocksdb.no.file.opens"),
        NoFileErrors("rocksdb.no.file.errors"),
        /// Writer has to wait for compaction or flush to finish.
        StallMicros("rocksdb.stall.micros"),
        /// The wait time for db mutex.
        /// Disabled by default. To enable it set stats level to kAll
        DbMutexWaitMicros("rocksdb.db.mutex.wait.micros"),

        /// Number of MultiGet calls, keys read, and bytes read
        NumberMultigetCalls("rocksdb.number.multiget.get"),
        NumberMultigetKeysRead("rocksdb.number.multiget.keys.read"),
        NumberMultigetBytesRead("rocksdb.number.multiget.bytes.read"),

        NumberMergeFailures("rocksdb.number.merge.failures"),

        /// Prefix filter stats when used for point lookups (Get / MultiGet).
        /// (For prefix filter stats on iterators, see *_LEVEL_Seek_*.)
        /// Checked: filter was queried
        BloomFilterPrefixChecked("rocksdb.bloom.filter.prefix.checked"),
        /// Useful: filter returned false so prevented accessing data+index blocks
        BloomFilterPrefixUseful("rocksdb.bloom.filter.prefix.useful"),
        /// True positive: found a key matching the point query. When another key
        /// with the same prefix matches, it is considered a false positive by
        /// these statistics even though the filter returned a true positive.
        BloomFilterPrefixTruePositive("rocksdb.bloom.filter.prefix.true.positive"),

        /// Number of times we had to reseek inside an iteration to skip
        /// over large number of keys with same userkey.
        NumberOfReseeksInIteration("rocksdb.number.reseeks.iteration"),

        /// Record the number of calls to GetUpdatesSince. Useful to keep track of
        /// transaction log iterator refreshes
        GetUpdatesSinceCalls("rocksdb.getupdatessince.calls"),
        /// Number of times WAL sync is done
        WalFileSynced("rocksdb.wal.synced"),
        /// Number of bytes written to WAL
        WalFileBytes("rocksdb.wal.bytes"),

        /// Writes can be processed by requesting thread or by the thread at the
        /// head of the writers queue.
        WriteDoneBySelf("rocksdb.write.self"),
        WriteDoneByOther("rocksdb.write.other"),
        /// Equivalent to writes done for others
        WriteWithWal("rocksdb.write.wal"),
        /// Number of Write calls that request WAL
        CompactReadBytes("rocksdb.compact.read.bytes"),
        /// Bytes read during compaction
        CompactWriteBytes("rocksdb.compact.write.bytes"),
        /// Bytes written during compaction
        FlushWriteBytes("rocksdb.flush.write.bytes"),
        /// Bytes written during flush

        /// Compaction read and write statistics broken down by CompactionReason
        CompactReadBytesMarked("rocksdb.compact.read.marked.bytes"),
        CompactReadBytesPeriodic("rocksdb.compact.read.periodic.bytes"),
        CompactReadBytesTtl("rocksdb.compact.read.ttl.bytes"),
        CompactWriteBytesMarked("rocksdb.compact.write.marked.bytes"),
        CompactWriteBytesPeriodic("rocksdb.compact.write.periodic.bytes"),
        CompactWriteBytesTtl("rocksdb.compact.write.ttl.bytes"),

        /// Number of table's properties loaded directly from file, without creating
        /// table reader object.
        NumberDirectLoadTableProperties("rocksdb.number.direct.load.table.properties"),
        NumberSuperversionAcquires("rocksdb.number.superversion_acquires"),
        NumberSuperversionReleases("rocksdb.number.superversion_releases"),
        NumberSuperversionCleanups("rocksdb.number.superversion_cleanups"),

        /// # of compressions/decompressions executed
        NumberBlockCompressed("rocksdb.number.block.compressed"),
        NumberBlockDecompressed("rocksdb.number.block.decompressed"),

        /// DEPRECATED / unused (see NumberBlockCompression_*)
        NumberBlockNotCompressed("rocksdb.number.block.not_compressed"),

        /// Tickers that record cumulative time.
        MergeOperationTotalTime("rocksdb.merge.operation.time.nanos"),
        FilterOperationTotalTime("rocksdb.filter.operation.time.nanos"),
        CompactionCpuTotalTime("rocksdb.compaction.total.time.cpu_micros"),

        /// Row cache.
        RowCacheHit("rocksdb.row.cache.hit"),
        RowCacheMiss("rocksdb.row.cache.miss"),

        /// Read amplification statistics.
        /// Read amplification can be calculated using this formula
        /// (ReadAMP_ToTAL_ReadBytes / Read_AMP_Estimate_UsefulBytes)
        //
        /// REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
        ReadAmpEstimateUsefulBytes("rocksdb.read.amp.estimate.useful.bytes"),
        /// Estimate of total bytes actually used.
        ReadAmpTotalReadBytes("rocksdb.read.amp.total.read.bytes"),
        /// Total size of loaded data blocks.

        /// Number of refill intervals where rate limiter's bytes are fully consumed.
        NumberRateLimiterDrains("rocksdb.number.rate_limiter.drains"),

        /// Number of internal keys skipped by Iterator
        NumberIterSkip("rocksdb.number.iter.skip"),

        /// BlobDB specific stats
        /// # of Put/PutTtl/PutUntil to BlobDB. Only applicable to legacy BlobDB.
        BlobDbNumPut("rocksdb.blobdb.num.put"),
        /// # of Write to BlobDB. Only applicable to legacy BlobDB.
        BlobDbNumWrite("rocksdb.blobdb.num.write"),
        /// # of Get to BlobDB. Only applicable to legacy BlobDB.
        BlobDbNumGet("rocksdb.blobdb.num.get"),
        /// # of MultiGet to BlobDB. Only applicable to legacy BlobDB.
        BlobDbNumMultiget("rocksdb.blobdb.num.multiget"),
        /// # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. Only
        /// applicable to legacy BlobDB.
        BlobDbNumSeek("rocksdb.blobdb.num.seek"),
        /// # of Next to BlobDB iterator. Only applicable to legacy BlobDB.
        BlobDbNumNext("rocksdb.blobdb.num.next"),
        /// # of Prev to BlobDB iterator. Only applicable to legacy BlobDB.
        BlobDbNumPrev("rocksdb.blobdb.num.prev"),
        /// # of keys written to BlobDB. Only applicable to legacy BlobDB.
        BlobDbNumKeysWritten("rocksdb.blobdb.num.keys.written"),
        /// # of keys read from BlobDB. Only applicable to legacy BlobDB.
        BlobDbNumKeysRead("rocksdb.blobdb.num.keys.read"),
        /// # of bytes (key + value) written to BlobDB. Only applicable to legacy
        /// BlobDB.
        BlobDbBytesWritten("rocksdb.blobdb.bytes.written"),
        /// # of bytes (keys + value) read from BlobDB. Only applicable to legacy
        /// BlobDB.
        BlobDbBytesRead("rocksdb.blobdb.bytes.read"),
        /// # of keys written by BlobDB as non-Ttl inlined value. Only applicable to
        /// legacy BlobDB.
        BlobDbWriteInlined("rocksdb.blobdb.write.inlined"),
        /// # of keys written by BlobDB as Ttl inlined value. Only applicable to legacy
        /// BlobDB.
        BlobDbWriteInlinedTtl("rocksdb.blobdb.write.inlined.ttl"),
        /// # of keys written by BlobDB as non-Ttl blob value. Only applicable to
        /// legacy BlobDB.
        BlobDbWriteBlob("rocksdb.blobdb.write.blob"),
        /// # of keys written by BlobDB as Ttl blob value. Only applicable to legacy
        /// BlobDB.
        BlobDbWriteBlobTtl("rocksdb.blobdb.write.blob.ttl"),
        /// # of bytes written to blob file.
        BlobDbBlobFileBytesWritten("rocksdb.blobdb.blob.file.bytes.written"),
        /// # of bytes read from blob file.
        BlobDbBlobFileBytesRead("rocksdb.blobdb.blob.file.bytes.read"),
        /// # of times a blob files being synced.
        BlobDbBlobFileSynced("rocksdb.blobdb.blob.file.synced"),
        /// # of blob index evicted from base DB by BlobDB compaction filter because
        /// of expiration. Only applicable to legacy BlobDB.
        BlobDbBlobIndexExpiredCount("rocksdb.blobdb.blob.index.expired.count"),
        /// size of blob index evicted from base DB by BlobDB compaction filter
        /// because of expiration. Only applicable to legacy BlobDB.
        BlobDbBlobIndexExpiredSize("rocksdb.blobdb.blob.index.expired.size"),
        /// # of blob index evicted from base DB by BlobDB compaction filter because
        /// of corresponding file deleted. Only applicable to legacy BlobDB.
        BlobDbBlobIndexEvictedCount("rocksdb.blobdb.blob.index.evicted.count"),
        /// size of blob index evicted from base DB by BlobDB compaction filter
        /// because of corresponding file deleted. Only applicable to legacy BlobDB.
        BlobDbBlobIndexEvictedSize("rocksdb.blobdb.blob.index.evicted.size"),
        /// # of blob files that were obsoleted by garbage collection. Only applicable
        /// to legacy BlobDB.
        BlobDbGcNumFiles("rocksdb.blobdb.gc.num.files"),
        /// # of blob files generated by garbage collection. Only applicable to legacy
        /// BlobDB.
        BlobDbGcNumNewFiles("rocksdb.blobdb.gc.num.new.files"),
        /// # of BlobDB garbage collection failures. Only applicable to legacy BlobDB.
        BlobDbGcFailures("rocksdb.blobdb.gc.failures"),
        /// # of keys relocated to new blob file by garbage collection.
        BlobDbGcNumKeysRelocated("rocksdb.blobdb.gc.num.keys.relocated"),
        /// # of bytes relocated to new blob file by garbage collection.
        BlobDbGcBytesRelocated("rocksdb.blobdb.gc.bytes.relocated"),
        /// # of blob files evicted because of BlobDB is full. Only applicable to
        /// legacy BlobDB.
        BlobDbFifoNumFilesEvicted("rocksdb.blobdb.fifo.num.files.evicted"),
        /// # of keys in the blob files evicted because of BlobDB is full. Only
        /// applicable to legacy BlobDB.
        BlobDbFifoNumKeysEvicted("rocksdb.blobdb.fifo.num.keys.evicted"),
        /// # of bytes in the blob files evicted because of BlobDB is full. Only
        /// applicable to legacy BlobDB.
        BlobDbFifoBytesEvicted("rocksdb.blobdb.fifo.bytes.evicted"),

        /// These counters indicate a performance issue in WritePrepared transactions.
        /// We should not seem them ticking them much.
        /// # of times prepare_mutex_ is acquired in the fast path.
        TxnPrepareMutexOverhead("rocksdb.txn.overhead.mutex.prepare"),
        /// # of times old_commit_map_mutex_ is acquired in the fast path.
        TxnOldCommitMapMutexOverhead("rocksdb.txn.overhead.mutex.old.commit.map"),
        /// # of times we checked a batch for duplicate keys.
        TxnDuplicateKeyOverhead("rocksdb.txn.overhead.duplicate.key"),
        /// # of times snapshot_mutex_ is acquired in the fast path.
        TxnSnapshotMutexOverhead("rocksdb.txn.overhead.mutex.snapshot"),
        /// # of times ::Get returned TryAgain due to expired snapshot seq
        TxnGetTryAgain("rocksdb.txn.get.tryagain"),

        /// Number of keys actually found in MultiGet calls (vs number requested by
        /// caller)
        /// NumberMultigetKeys_Read gives the number requested by caller
        NumberMultigetKeysFound("rocksdb.number.multiget.keys.found"),

        NoIteratorCreated("rocksdb.num.iterator.created"),
        /// number of iterators created
        NoIteratorDeleted("rocksdb.num.iterator.deleted"),
        /// number of iterators deleted
        BlockCacheCompressionDictMiss("rocksdb.block.cache.compression.dict.miss"),
        BlockCacheCompressionDictHit("rocksdb.block.cache.compression.dict.hit"),
        BlockCacheCompressionDictAdd("rocksdb.block.cache.compression.dict.add"),
        BlockCacheCompressionDictBytesInsert("rocksdb.block.cache.compression.dict.bytes.insert"),

        /// # of blocks redundantly inserted into block cache.
        /// REQUIRES: BlockCacheAddRedundant <= BlockCacheAdd
        BlockCacheAddRedundant("rocksdb.block.cache.add.redundant"),
        /// # of index blocks redundantly inserted into block cache.
        /// REQUIRES: BlockCacheIndexAddRedundant <= BlockCacheIndexAdd
        BlockCacheIndexAddRedundant("rocksdb.block.cache.index.add.redundant"),
        /// # of filter blocks redundantly inserted into block cache.
        /// REQUIRES: BlockCacheFilterAddRedundant <= BlockCacheFilterAdd
        BlockCacheFilterAddRedundant("rocksdb.block.cache.filter.add.redundant"),
        /// # of data blocks redundantly inserted into block cache.
        /// REQUIRES: BlockCacheDataAddRedundant <= BlockCacheDataAdd
        BlockCacheDataAddRedundant("rocksdb.block.cache.data.add.redundant"),
        /// # of dict blocks redundantly inserted into block cache.
        /// REQUIRES: BlockCacheCompressionDictAddRedundant
        ///           <= BlockCacheCompressionDictAdd
        BlockCacheCompressionDictAddRedundant("rocksdb.block.cache.compression.dict.add.redundant"),

        /// # of files marked as trash by sst file manager and will be deleted
        /// later by background thread.
        FilesMarkedTrash("rocksdb.files.marked.trash"),
        /// # of trash files deleted by the background thread from the trash queue.
        FilesDeletedFromTrashQueue("rocksdb.files.marked.trash.deleted"),
        /// # of files deleted immediately by sst file manager through delete
        /// scheduler.
        FilesDeletedImmediately("rocksdb.files.deleted.immediately"),

        /// The counters for error handler, not that, bg_io_error is the subset of
        /// bg_error and bg_retryable_io_error is the subset of bg_io_error.
        /// The misspelled versions are deprecated and only kept for compatibility.
        /// ToDO: remove the misspelled tickers in the next major release.
        ErrorHandlerBgErrorCount("rocksdb.error.handler.bg.error.count"),
        ErrorHandlerBgErrorCountMisspelled("rocksdb.error.handler.bg.errro.count"),
        ErrorHandlerBgIoErrorCount("rocksdb.error.handler.bg.io.error.count"),
        ErrorHandlerBgIoErrorCountMisspelled("rocksdb.error.handler.bg.io.errro.count"),
        ErrorHandlerBgRetryableIoErrorCount("rocksdb.error.handler.bg.retryable.io.error.count"),
        ErrorHandlerBgRetryableIoErrorCountMisspelled("rocksdb.error.handler.bg.retryable.io.errro.count"),
        ErrorHandlerAutoResumeCount("rocksdb.error.handler.autoresume.count"),
        ErrorHandlerAutoResumeRetryTotalCount("rocksdb.error.handler.autoresume.retry.total.count"),
        ErrorHandlerAutoResumeSuccessCount("rocksdb.error.handler.autoresume.success.count"),

        /// Statistics for memtable garbage collection:
        /// Raw bytes of data (payload) present on memtable at flush time.
        MemtablePayloadBytesAtFlush("rocksdb.memtable.payload.bytes.at.flush"),
        /// Outdated bytes of data present on memtable at flush time.
        MemtableGarbageBytesAtFlush("rocksdb.memtable.garbage.bytes.at.flush"),

        /// Secondary cache statistics
        SecondaryCacheHits("rocksdb.secondary.cache.hits"),

        /// Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs.
        VerifyChecksumReadBytes("rocksdb.verify_checksum.read.bytes"),

        /// Bytes read/written while creating backups
        BackupReadBytes("rocksdb.backup.read.bytes"),
        BackupWriteBytes("rocksdb.backup.write.bytes"),

        /// Remote compaction read/write statistics
        RemoteCompactReadBytes("rocksdb.remote.compact.read.bytes"),
        RemoteCompactWriteBytes("rocksdb.remote.compact.write.bytes"),

        /// Tiered storage related statistics
        HotFileReadBytes("rocksdb.hot.file.read.bytes"),
        WarmFileReadBytes("rocksdb.warm.file.read.bytes"),
        ColdFileReadBytes("rocksdb.cold.file.read.bytes"),
        HotFileReadCount("rocksdb.hot.file.read.count"),
        WarmFileReadCount("rocksdb.warm.file.read.count"),
        ColdFileReadCount("rocksdb.cold.file.read.count"),

        /// Last level and non-last level read statistics
        LastLevelReadBytes("rocksdb.last.level.read.bytes"),
        LastLevelReadCount("rocksdb.last.level.read.count"),
        NonLastLevelReadBytes("rocksdb.non.last.level.read.bytes"),
        NonLastLevelReadCount("rocksdb.non.last.level.read.count"),

        /// Statistics on iterator Seek() (and variants) for each sorted run. I.e. a
        /// single user Seek() can result in many sorted run Seek()s.
        /// The stats are split between last level and non-last level.
        /// Filtered: a filter such as prefix Bloom filter indicate the Seek() would
        /// not find anything relevant, so avoided a likely access to data+index
        /// blocks.
        LastLevelSeekFiltered("rocksdb.last.level.seek.filtered"),
        /// Filter match: a filter such as prefix Bloom filter was queried but did
        /// not filter out the seek.
        LastLevelSeekFilterMatch("rocksdb.last.level.seek.filter.match"),
        /// At least one data block was accessed for a Seek() (or variant) on a
        /// sorted run.
        LastLevelSeekData("rocksdb.last.level.seek.data"),
        /// At least one value() was accessed for the seek (suggesting it was useful),
        /// and no filter such as prefix Bloom was queried.
        LastLevelSeekDataUsefulNoFilter("rocksdb.last.level.seek.data.useful.no.filter"),
        /// At least one value() was accessed for the seek (suggesting it was useful),
        /// after querying a filter such as prefix Bloom.
        LastLevelSeekDataUsefulFilterMatch("rocksdb.last.level.seek.data.useful.filter.match"),
        /// The same set of stats, but for non-last level seeks.
        NonLastLevelSeekFiltered("rocksdb.non.last.level.seek.filtered"),
        NonLastLevelSeekFilterMatch("rocksdb.non.last.level.seek.filter.match"),
        NonLastLevelSeekData("rocksdb.non.last.level.seek.data"),
        NonLastLevelSeekDataUsefulNoFilter("rocksdb.non.last.level.seek.data.useful.no.filter"),
        NonLastLevelSeekDataUsefulFilterMatch("rocksdb.non.last.level.seek.data.useful.filter.match"),

        /// Number of block checksum verifications
        BlockChecksumComputeCount("rocksdb.block.checksum.compute.count"),
        /// Number of times RocksDB detected a corruption while verifying a block
        /// checksum. RocksDB does not remember corruptions that happened during user
        /// reads so the same block corruption may be detected multiple times.
        BlockChecksumMismatchCount("rocksdb.block.checksum.mismatch.count"),

        MultigetCoroutineCount("rocksdb.multiget.coroutine.count"),

        /// Integrated BlobDB specific stats
        /// # of times cache miss when accessing blob from blob cache.
        BlobDbCacheMiss("rocksdb.blobdb.cache.miss"),
        /// # of times cache hit when accessing blob from blob cache.
        BlobDbCacheHit("rocksdb.blobdb.cache.hit"),
        /// # of data blocks added to blob cache.
        BlobDbCacheAdd("rocksdb.blobdb.cache.add"),
        /// # of failures when adding blobs to blob cache.
        BlobDbCacheAddFailures("rocksdb.blobdb.cache.add.failures"),
        /// # of bytes read from blob cache.
        BlobDbCacheBytesRead("rocksdb.blobdb.cache.bytes.read"),
        /// # of bytes written into blob cache.
        BlobDbCacheBytesWrite("rocksdb.blobdb.cache.bytes.write"),

        /// Time spent in the ReadAsync file system call
        ReadAsyncMicros("rocksdb.read.async.micros"),
        /// Number of errors returned to the async read callback
        AsyncReadErrorCount("rocksdb.async.read.error.count"),

        /// Fine grained secondary cache stats
        SecondaryCacheFilterHits("rocksdb.secondary.cache.filter.hits"),
        SecondaryCacheIndexHits("rocksdb.secondary.cache.index.hits"),
        SecondaryCacheDataHits("rocksdb.secondary.cache.data.hits"),

        /// Number of lookup into the prefetched tail (see
        /// `TableOpenPrefetchTailReadBytes`)
        /// that can't find its data for table open
        TableOpenPrefetchTailMiss("rocksdb.table.open.prefetch.tail.miss"),
        /// Number of lookup into the prefetched tail (see
        /// `TableOpenPrefetchTailReadBytes`)
        /// that finds its data for table open
        TableOpenPrefetchTailHit("rocksdb.table.open.prefetch.tail.hit"),

        /// Statistics on the filtering by user-defined timestamps
        /// # of times timestamps are checked on accessing the table
        TimestampFilterTableChecked("rocksdb.timestamp.filter.table.checked"),
        /// # of times timestamps can successfully help skip the table access
        TimestampFilterTableFiltered("rocksdb.timestamp.filter.table.filtered"),

        /// Number of input bytes (uncompressed) to compression for SST blocks that
        /// are stored compressed.
        BytesCompressedFrom("rocksdb.bytes.compressed.from"),
        /// Number of output bytes (compressed) from compression for SST blocks that
        /// are stored compressed.
        BytesCompressedTo("rocksdb.bytes.compressed.to"),
        /// Number of uncompressed bytes for SST blocks that are stored uncompressed
        /// because compression type is kNoCompression, or some error case caused
        /// compression not to run or produce an output. Index blocks are only counted
        /// if enable_index_compression is true.
        BytesCompressionBypassed("rocksdb.bytes.compression_bypassed"),
        /// Number of input bytes (uncompressed) to compression for SST blocks that
        /// are stored uncompressed because the compression result was rejected,
        /// either because the ratio was not acceptable (see
        /// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
        /// `verify_compression` option.
        BytesCompressionRejected("rocksdb.bytes.compression.rejected"),

        /// Like BytesCompressionBypassed but counting number of blocks
        NumberBlockCompressionBypassed("rocksdb.number.block_compression_bypassed"),
        /// Like BytesCompressionRejected but counting number of blocks
        NumberBlockCompressionRejected("rocksdb.number.block_compression_rejected"),

        /// Number of input bytes (compressed) to decompression in reading compressed
        /// SST blocks from storage.
        BytesDecompressedFrom("rocksdb.bytes.decompressed.from"),
        /// Number of output bytes (uncompressed) from decompression in reading
        /// compressed SST blocks from storage.
        BytesDecompressedTo("rocksdb.bytes.decompressed.to"),

        /// Number of times readahead is trimmed during scans when
        /// ReadOptions.auto_readahead_size is set.
        ReadAheadTrimmed("rocksdb.readahead.trimmed"),

        /// Number of Fifo compactions that drop files based on different reasons
        FifoMaxSizeCompactions("rocksdb.fifo.max.size.compactions"),
        FifoTtlCompactions("rocksdb.fifo.ttl.compactions"),

        /// Number of bytes prefetched during user initiated scan
        PrefetchBytes("rocksdb.prefetch.bytes"),

        /// Number of prefetched bytes that were actually useful
        PrefetchBytesUseful("rocksdb.prefetch.bytes.useful"),

        /// Number of FS reads avoided due to scan prefetching
        PrefetchHits("rocksdb.prefetch.hits"),

        /// Compressed secondary cache related stats
        CompressedSecondaryCacheDummyHits("rocksdb.compressed.secondary.cache.dummy.hits"),
        CompressedSecondaryCacheHits("rocksdb.compressed.secondary.cache.hits"),
        CompressedSecondaryCachePromotions("rocksdb.compressed.secondary.cache.promotions"),
        CompressedSecondaryCachePromotionSkips("rocksdb.compressed.secondary.cache.promotion.skips"),
    }
}

iterable_named_enum! {
    #[derive(Debug, Copy, Clone, PartialEq, Eq)]
    #[repr(u32)]
    pub enum Histogram {
        DbGet("rocksdb.db.get.micros") = 0,
        DbWrite("rocksdb.db.write.micros"),
        CompactionTime("rocksdb.compaction.times.micros"),
        CompactionCpuTime("rocksdb.compaction.times.cpu_micros"),
        SubcompactionSetupTime("rocksdb.subcompaction.setup.times.micros"),
        TableSyncMicros("rocksdb.table.sync.micros"),
        CompactionOutfileSyncMicros("rocksdb.compaction.outfile.sync.micros"),
        WalFileSyncMicros("rocksdb.wal.file.sync.micros"),
        ManifestFileSyncMicros("rocksdb.manifest.file.sync.micros"),
        /// Time spent in IO during table open
        TableOpenIoMicros("rocksdb.table.open.io.micros"),
        DbMultiget("rocksdb.db.multiget.micros"),
        ReadBlockCompactionMicros("rocksdb.read.block.compaction.micros"),
        ReadBlockGetMicros("rocksdb.read.block.get.micros"),
        WriteRawBlockMicros("rocksdb.write.raw.block.micros"),
        NumFilesInSingleCompaction("rocksdb.numfiles.in.singlecompaction"),
        DbSeek("rocksdb.db.seek.micros"),
        WriteStall("rocksdb.db.write.stall"),
        /// Time spent in reading block-based or plain SST table
        SstReadMicros("rocksdb.sst.read.micros"),
        /// Time spent in reading SST table (currently only block-based table) or blob
        /// file corresponding to `Env::IOActivity`
        FileReadFlushMicros("rocksdb.file.read.flush.micros"),
        FileReadCompactionMicros("rocksdb.file.read.compaction.micros"),
        FileReadDbOpenMicros("rocksdb.file.read.db.open.micros"),
        /// The following `FILE_READ_*` require stats level greater than
        /// `StatsLevel::kExceptDetailedTimers`
        FileReadGetMicros("rocksdb.file.read.get.micros"),
        FileReadMultigetMicros("rocksdb.file.read.multiget.micros"),
        FileReadDbIteratorMicros("rocksdb.file.read.db.iterator.micros"),
        FileReadVerifyDbChecksumMicros("rocksdb.file.read.verify.db.checksum.micros"),
        FileReadVerifyFileChecksumsMicros("rocksdb.file.read.verify.file.checksums.micros"),
        /// The number of subcompactions actually scheduled during a compaction
        NumSubcompactionsScheduled("rocksdb.num.subcompactions.scheduled"),
        /// Value size distribution in each operation
        BytesPerRead("rocksdb.bytes.per.read"),
        BytesPerWrite("rocksdb.bytes.per.write"),
        BytesPerMultiget("rocksdb.bytes.per.multiget"),
        BytesCompressed("rocksdb.bytes.compressed"),
        /// DEPRECATED / unused (see BytesCompressed{From,To})
        BytesDecompressed("rocksdb.bytes.decompressed"),
        /// DEPRECATED / unused (see BytesDecompressed{From,To})
        CompressionTimesNanos("rocksdb.compression.times.nanos"),
        DecompressionTimesNanos("rocksdb.decompression.times.nanos"),
        /// Number of merge operands passed to the merge operator in user read
        /// requests.
        ReadNumMergeOperands("rocksdb.read.num.merge_operands"),
        /// BlobDB specific stats
        /// Size of keys written to BlobDB. Only applicable to legacy BlobDB.
        BlobDbKeySize("rocksdb.blobdb.key.size"),
        /// Size of values written to BlobDB. Only applicable to legacy BlobDB.
        BlobDbValueSize("rocksdb.blobdb.value.size"),
        /// BlobDB Put/PutWithTTL/PutUntil/Write latency. Only applicable to legacy
        /// BlobDB.
        BlobDbWriteMicros("rocksdb.blobdb.write.micros"),
        /// BlobDB Get latency. Only applicable to legacy BlobDB.
        BlobDbGetMicros("rocksdb.blobdb.get.micros"),
        /// BlobDB MultiGet latency. Only applicable to legacy BlobDB.
        BlobDbMultigetMicros("rocksdb.blobdb.multiget.micros"),
        /// BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency. Only applicable to
        /// legacy BlobDB.
        BlobDbSeekMicros("rocksdb.blobdb.seek.micros"),
        /// BlobDB Next latency. Only applicable to legacy BlobDB.
        BlobDbNextMicros("rocksdb.blobdb.next.micros"),
        /// BlobDB Prev latency. Only applicable to legacy BlobDB.
        BlobDbPrevMicros("rocksdb.blobdb.prev.micros"),
        /// Blob file write latency.
        BlobDbBlobFileWriteMicros("rocksdb.blobdb.blob.file.write.micros"),
        /// Blob file read latency.
        BlobDbBlobFileReadMicros("rocksdb.blobdb.blob.file.read.micros"),
        /// Blob file sync latency.
        BlobDbBlobFileSyncMicros("rocksdb.blobdb.blob.file.sync.micros"),
        /// BlobDB compression time.
        BlobDbCompressionMicros("rocksdb.blobdb.compression.micros"),
        /// BlobDB decompression time.
        BlobDbDecompressionMicros("rocksdb.blobdb.decompression.micros"),
        /// Time spent flushing memtable to disk
        FlushTime("rocksdb.db.flush.micros"),
        SstBatchSize("rocksdb.sst.batch.size"),
        /// MultiGet stats logged per level
        /// Num of index and filter blocks read from file system per level.
        NumIndexAndFilterBlocksReadPerLevel("rocksdb.num.index.and.filter.blocks.read.per.level"),
        /// Num of sst files read from file system per level.
        NumSstReadPerLevel("rocksdb.num.sst.read.per.level"),
        /// Error handler statistics
        ErrorHandlerAutoresumeRetryCount("rocksdb.error.handler.autoresume.retry.count"),
        /// Stats related to asynchronous read requests.
        AsyncReadBytes("rocksdb.async.read.bytes"),
        PollWaitMicros("rocksdb.poll.wait.micros"),
        /// Number of prefetched bytes discarded by RocksDB.
        PrefetchedBytesDiscarded("rocksdb.prefetched.bytes.discarded"),
        /// Number of IOs issued in parallel in a MultiGet batch
        MultigetIoBatchSize("rocksdb.multiget.io.batch.size"),
        /// Number of levels requiring IO for MultiGet
        NumLevelReadPerMultiget("rocksdb.num.level.read.per.multiget"),
        /// Wait time for aborting async read in FilePrefetchBuffer destructor
        AsyncPrefetchAbortMicros("rocksdb.async.prefetch.abort.micros"),
        /// Number of bytes read for RocksDB's prefetching contents (as opposed to file
        /// system's prefetch) from the end of SST table during block based table open
        TableOpenPrefetchTailReadBytes("rocksdb.table.open.prefetch.tail.read.bytes"),
    }
}

pub struct HistogramData {
    pub(crate) inner: *mut ffi::rocksdb_statistics_histogram_data_t,
}

impl HistogramData {
    pub fn new() -> HistogramData {
        HistogramData::default()
    }
    pub fn median(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_median(self.inner) }
    }
    pub fn average(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_average(self.inner) }
    }
    pub fn p95(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_p95(self.inner) }
    }
    pub fn p99(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_p99(self.inner) }
    }
    pub fn max(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_max(self.inner) }
    }
    pub fn min(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_min(self.inner) }
    }
    pub fn sum(&self) -> u64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_sum(self.inner) }
    }
    pub fn count(&self) -> u64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_count(self.inner) }
    }
    pub fn std_dev(&self) -> f64 {
        unsafe { ffi::rocksdb_statistics_histogram_data_get_std_dev(self.inner) }
    }
}

impl Default for HistogramData {
    fn default() -> Self {
        let histogram_data_inner = unsafe { ffi::rocksdb_statistics_histogram_data_create() };
        assert!(
            !histogram_data_inner.is_null(),
            "Could not create RocksDB histogram data"
        );

        Self {
            inner: histogram_data_inner,
        }
    }
}

impl Drop for HistogramData {
    fn drop(&mut self) {
        unsafe {
            ffi::rocksdb_statistics_histogram_data_destroy(self.inner);
        }
    }
}

#[test]
fn sanity_checks() {
    let want = "rocksdb.async.read.bytes";
    assert_eq!(want, Histogram::AsyncReadBytes.name());

    let want = "rocksdb.block.cache.index.miss";
    assert_eq!(want, Ticker::BlockCacheIndexMiss.to_string());

    // assert enum lengths
    assert_eq!(Ticker::iter().count(), 215 /* TICKER_ENUM_MAX */);
    assert_eq!(Histogram::iter().count(), 60 /* HISTOGRAM_ENUM_MAX */);
}