rocksdb/
db_options.rs

1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::path::Path;
17use std::ptr::{null_mut, NonNull};
18use std::slice;
19use std::sync::Arc;
20
21use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
22
23use crate::column_family::ColumnFamilyTtl;
24use crate::statistics::{Histogram, HistogramData, StatsLevel};
25use crate::{
26    compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
27    compaction_filter_factory::{self, CompactionFilterFactory},
28    comparator::{
29        ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
30    },
31    db::DBAccess,
32    env::Env,
33    ffi,
34    ffi_util::{from_cstr, to_cpath, CStrLike},
35    merge_operator::{
36        self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
37    },
38    slice_transform::SliceTransform,
39    statistics::Ticker,
40    ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
41};
42
43pub(crate) struct WriteBufferManagerWrapper {
44    pub(crate) inner: NonNull<ffi::rocksdb_write_buffer_manager_t>,
45}
46
47impl Drop for WriteBufferManagerWrapper {
48    fn drop(&mut self) {
49        unsafe {
50            ffi::rocksdb_write_buffer_manager_destroy(self.inner.as_ptr());
51        }
52    }
53}
54
55#[derive(Clone)]
56pub struct WriteBufferManager(pub(crate) Arc<WriteBufferManagerWrapper>);
57
58impl WriteBufferManager {
59    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
60    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
61    /// Users can enable this control by 2 ways:
62    ///
63    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
64    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
65    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
66    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
67    ///
68    /// A memory limit is given when creating the write buffer manager object. RocksDB will try to limit the total memory to under this limit.
69    ///
70    /// a flush will be triggered on one column family of the DB you are inserting to,
71    ///
72    /// If mutable memtable size exceeds about 90% of the limit,
73    /// If the total memory is over the limit, more aggressive flush may also be triggered only if the mutable memtable size also exceeds 50% of the limit.
74    /// Both checks are needed because if already more than half memory is being flushed, triggering more flush may not help.
75    ///
76    /// The total memory is counted as total memory allocated in the arena, even if some of that may not yet be used by memtable.
77    ///
78    /// buffer_size: the memory limit in bytes.
79    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
80    ///             It will wait for flush to complete and memory usage to drop down
81    pub fn new_write_buffer_manager(buffer_size: size_t, allow_stall: bool) -> Self {
82        let inner = NonNull::new(unsafe {
83            ffi::rocksdb_write_buffer_manager_create(buffer_size, allow_stall)
84        })
85        .unwrap();
86        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
87    }
88
89    /// Users can set up RocksDB to cost memory used by memtables to block cache.
90    /// This can happen no matter whether you enable memtable memory limit or not.
91    /// This option is added to manage memory (memtables + block cache) under a single limit.
92    ///
93    /// buffer_size: the memory limit in bytes.
94    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
95    ///             It will wait for flush to complete and memory usage to drop down
96    /// cache: the block cache instance
97    pub fn new_write_buffer_manager_with_cache(
98        buffer_size: size_t,
99        allow_stall: bool,
100        cache: Cache,
101    ) -> Self {
102        let inner = NonNull::new(unsafe {
103            ffi::rocksdb_write_buffer_manager_create_with_cache(
104                buffer_size,
105                cache.0.inner.as_ptr(),
106                allow_stall,
107            )
108        })
109        .unwrap();
110        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
111    }
112
113    /// Returns the WriteBufferManager memory usage in bytes.
114    pub fn get_usage(&self) -> usize {
115        unsafe { ffi::rocksdb_write_buffer_manager_memory_usage(self.0.inner.as_ptr()) }
116    }
117
118    /// Returns the current buffer size in bytes.
119    pub fn get_buffer_size(&self) -> usize {
120        unsafe { ffi::rocksdb_write_buffer_manager_buffer_size(self.0.inner.as_ptr()) }
121    }
122
123    /// Set the buffer size in bytes.
124    pub fn set_buffer_size(&self, new_size: usize) {
125        unsafe {
126            ffi::rocksdb_write_buffer_manager_set_buffer_size(self.0.inner.as_ptr(), new_size);
127        }
128    }
129
130    /// Returns if WriteBufferManager is enabled.
131    pub fn enabled(&self) -> bool {
132        unsafe { ffi::rocksdb_write_buffer_manager_enabled(self.0.inner.as_ptr()) }
133    }
134
135    /// set the allow_stall flag.
136    pub fn set_allow_stall(&self, allow_stall: bool) {
137        unsafe {
138            ffi::rocksdb_write_buffer_manager_set_allow_stall(self.0.inner.as_ptr(), allow_stall);
139        }
140    }
141}
142
143pub(crate) struct CacheWrapper {
144    pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
145}
146
147impl Drop for CacheWrapper {
148    fn drop(&mut self) {
149        unsafe {
150            ffi::rocksdb_cache_destroy(self.inner.as_ptr());
151        }
152    }
153}
154
155#[derive(Clone)]
156pub struct Cache(pub(crate) Arc<CacheWrapper>);
157
158impl Cache {
159    /// Creates an LRU cache with capacity in bytes.
160    pub fn new_lru_cache(capacity: size_t) -> Cache {
161        let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
162        Cache(Arc::new(CacheWrapper { inner }))
163    }
164
165    /// Creates an LRU cache with custom options.
166    pub fn new_lru_cache_opts(opts: &LruCacheOptions) -> Cache {
167        let inner =
168            NonNull::new(unsafe { ffi::rocksdb_cache_create_lru_opts(opts.inner) }).unwrap();
169        Cache(Arc::new(CacheWrapper { inner }))
170    }
171
172    /// Creates a HyperClockCache with capacity in bytes.
173    ///
174    /// `estimated_entry_charge` is an important tuning parameter. The optimal
175    /// choice at any given time is
176    /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
177    /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
178    /// cache.get_occupancy_count()`.
179    ///
180    /// However, the value cannot be changed dynamically, so as the cache
181    /// composition changes at runtime, the following tradeoffs apply:
182    ///
183    /// * If the estimate is substantially too high (e.g., 25% higher),
184    ///   the cache may have to evict entries to prevent load factors that
185    ///   would dramatically affect lookup times.
186    /// * If the estimate is substantially too low (e.g., less than half),
187    ///   then meta data space overhead is substantially higher.
188    ///
189    /// The latter is generally preferable, and picking the larger of
190    /// block size and meta data block size is a reasonable choice that
191    /// errs towards this side.
192    pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
193        Cache(Arc::new(CacheWrapper {
194            inner: NonNull::new(unsafe {
195                ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
196            })
197            .unwrap(),
198        }))
199    }
200
201    /// Returns the cache memory usage in bytes.
202    pub fn get_usage(&self) -> usize {
203        unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
204    }
205
206    /// Returns the pinned memory usage in bytes.
207    pub fn get_pinned_usage(&self) -> usize {
208        unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
209    }
210
211    /// Sets cache capacity in bytes.
212    pub fn set_capacity(&mut self, capacity: size_t) {
213        unsafe {
214            ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
215        }
216    }
217}
218
219#[derive(Default)]
220pub(crate) struct OptionsMustOutliveDB {
221    env: Option<Env>,
222    row_cache: Option<Cache>,
223    blob_cache: Option<Cache>,
224    block_based: Option<BlockBasedOptionsMustOutliveDB>,
225    write_buffer_manager: Option<WriteBufferManager>,
226}
227
228impl OptionsMustOutliveDB {
229    pub(crate) fn clone(&self) -> Self {
230        Self {
231            env: self.env.clone(),
232            row_cache: self.row_cache.clone(),
233            blob_cache: self.blob_cache.clone(),
234            block_based: self
235                .block_based
236                .as_ref()
237                .map(BlockBasedOptionsMustOutliveDB::clone),
238            write_buffer_manager: self.write_buffer_manager.clone(),
239        }
240    }
241}
242
243#[derive(Default)]
244struct BlockBasedOptionsMustOutliveDB {
245    block_cache: Option<Cache>,
246}
247
248impl BlockBasedOptionsMustOutliveDB {
249    fn clone(&self) -> Self {
250        Self {
251            block_cache: self.block_cache.clone(),
252        }
253    }
254}
255
256/// Database-wide options around performance and behavior.
257///
258/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
259/// and most importantly, measure performance under realistic workloads with realistic hardware.
260///
261/// # Examples
262///
263/// ```
264/// use rocksdb::{Options, DB};
265/// use rocksdb::DBCompactionStyle;
266///
267/// fn badly_tuned_for_somebody_elses_disk() -> DB {
268///    let path = "path/for/rocksdb/storageX";
269///    let mut opts = Options::default();
270///    opts.create_if_missing(true);
271///    opts.set_max_open_files(10000);
272///    opts.set_use_fsync(false);
273///    opts.set_bytes_per_sync(8388608);
274///    opts.optimize_for_point_lookup(1024);
275///    opts.set_table_cache_num_shard_bits(6);
276///    opts.set_max_write_buffer_number(32);
277///    opts.set_write_buffer_size(536870912);
278///    opts.set_target_file_size_base(1073741824);
279///    opts.set_min_write_buffer_number_to_merge(4);
280///    opts.set_level_zero_stop_writes_trigger(2000);
281///    opts.set_level_zero_slowdown_writes_trigger(0);
282///    opts.set_compaction_style(DBCompactionStyle::Universal);
283///    opts.set_disable_auto_compactions(true);
284///
285///    DB::open(&opts, path).unwrap()
286/// }
287/// ```
288pub struct Options {
289    pub(crate) inner: *mut ffi::rocksdb_options_t,
290    pub(crate) outlive: OptionsMustOutliveDB,
291}
292
293/// Optionally disable WAL or sync for this write.
294///
295/// # Examples
296///
297/// Making an unsafe write of a batch:
298///
299/// ```
300/// use rocksdb::{DB, Options, WriteBatch, WriteOptions};
301///
302/// let tempdir = tempfile::Builder::new()
303///     .prefix("_path_for_rocksdb_storageY1")
304///     .tempdir()
305///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
306/// let path = tempdir.path();
307/// {
308///     let db = DB::open_default(path).unwrap();
309///     let mut batch = WriteBatch::default();
310///     batch.put(b"my key", b"my value");
311///     batch.put(b"key2", b"value2");
312///     batch.put(b"key3", b"value3");
313///
314///     let mut write_options = WriteOptions::default();
315///     write_options.set_sync(false);
316///     write_options.disable_wal(true);
317///
318///     db.write_opt(batch, &write_options);
319/// }
320/// let _ = DB::destroy(&Options::default(), path);
321/// ```
322pub struct WriteOptions {
323    pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
324}
325
326pub struct LruCacheOptions {
327    pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
328}
329
330/// Optionally wait for the memtable flush to be performed.
331///
332/// # Examples
333///
334/// Manually flushing the memtable:
335///
336/// ```
337/// use rocksdb::{DB, Options, FlushOptions};
338///
339/// let tempdir = tempfile::Builder::new()
340///     .prefix("_path_for_rocksdb_storageY2")
341///     .tempdir()
342///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
343/// let path = tempdir.path();
344/// {
345///     let db = DB::open_default(path).unwrap();
346///
347///     let mut flush_options = FlushOptions::default();
348///     flush_options.set_wait(true);
349///
350///     db.flush_opt(&flush_options);
351/// }
352/// let _ = DB::destroy(&Options::default(), path);
353/// ```
354pub struct FlushOptions {
355    pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
356}
357
358/// For configuring block-based file storage.
359pub struct BlockBasedOptions {
360    pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
361    outlive: BlockBasedOptionsMustOutliveDB,
362}
363
364pub struct ReadOptions {
365    pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
366    // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
367    // This is necessary to ensure the pointers we pass over the FFI live as
368    // long as the `ReadOptions`. This way, when performing the read operation,
369    // the pointers are guaranteed to be valid.
370    timestamp: Option<Vec<u8>>,
371    iter_start_ts: Option<Vec<u8>>,
372    iterate_upper_bound: Option<Vec<u8>>,
373    iterate_lower_bound: Option<Vec<u8>>,
374}
375
376/// Configuration of cuckoo-based storage.
377pub struct CuckooTableOptions {
378    pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
379}
380
381/// For configuring external files ingestion.
382///
383/// # Examples
384///
385/// Move files instead of copying them:
386///
387/// ```
388/// use rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
389///
390/// let writer_opts = Options::default();
391/// let mut writer = SstFileWriter::create(&writer_opts);
392/// let tempdir = tempfile::Builder::new()
393///     .tempdir()
394///     .expect("Failed to create temporary folder for the _path_for_sst_file");
395/// let path1 = tempdir.path().join("_path_for_sst_file");
396/// writer.open(path1.clone()).unwrap();
397/// writer.put(b"k1", b"v1").unwrap();
398/// writer.finish().unwrap();
399///
400/// let tempdir2 = tempfile::Builder::new()
401///     .prefix("_path_for_rocksdb_storageY3")
402///     .tempdir()
403///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
404/// let path2 = tempdir2.path();
405/// {
406///   let db = DB::open_default(&path2).unwrap();
407///   let mut ingest_opts = IngestExternalFileOptions::default();
408///   ingest_opts.set_move_files(true);
409///   db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
410/// }
411/// let _ = DB::destroy(&Options::default(), path2);
412/// ```
413pub struct IngestExternalFileOptions {
414    pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
415}
416
417// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
418// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
419// rocksdb internally does not rely on thread-local information for its user-exposed types.
420unsafe impl Send for Options {}
421unsafe impl Send for WriteOptions {}
422unsafe impl Send for LruCacheOptions {}
423unsafe impl Send for FlushOptions {}
424unsafe impl Send for BlockBasedOptions {}
425unsafe impl Send for CuckooTableOptions {}
426unsafe impl Send for ReadOptions {}
427unsafe impl Send for IngestExternalFileOptions {}
428unsafe impl Send for CacheWrapper {}
429unsafe impl Send for CompactOptions {}
430unsafe impl Send for WriteBufferManagerWrapper {}
431
432// Sync is similarly safe for many types because they do not expose interior mutability, and their
433// use within the rocksdb library is generally behind a const reference
434unsafe impl Sync for Options {}
435unsafe impl Sync for WriteOptions {}
436unsafe impl Sync for LruCacheOptions {}
437unsafe impl Sync for FlushOptions {}
438unsafe impl Sync for BlockBasedOptions {}
439unsafe impl Sync for CuckooTableOptions {}
440unsafe impl Sync for ReadOptions {}
441unsafe impl Sync for IngestExternalFileOptions {}
442unsafe impl Sync for CacheWrapper {}
443unsafe impl Sync for CompactOptions {}
444unsafe impl Sync for WriteBufferManagerWrapper {}
445
446impl Drop for Options {
447    fn drop(&mut self) {
448        unsafe {
449            ffi::rocksdb_options_destroy(self.inner);
450        }
451    }
452}
453
454impl Clone for Options {
455    fn clone(&self) -> Self {
456        let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
457        assert!(!inner.is_null(), "Could not copy RocksDB options");
458
459        Self {
460            inner,
461            outlive: self.outlive.clone(),
462        }
463    }
464}
465
466impl Drop for BlockBasedOptions {
467    fn drop(&mut self) {
468        unsafe {
469            ffi::rocksdb_block_based_options_destroy(self.inner);
470        }
471    }
472}
473
474impl Drop for CuckooTableOptions {
475    fn drop(&mut self) {
476        unsafe {
477            ffi::rocksdb_cuckoo_options_destroy(self.inner);
478        }
479    }
480}
481
482impl Drop for FlushOptions {
483    fn drop(&mut self) {
484        unsafe {
485            ffi::rocksdb_flushoptions_destroy(self.inner);
486        }
487    }
488}
489
490impl Drop for WriteOptions {
491    fn drop(&mut self) {
492        unsafe {
493            ffi::rocksdb_writeoptions_destroy(self.inner);
494        }
495    }
496}
497
498impl Drop for LruCacheOptions {
499    fn drop(&mut self) {
500        unsafe {
501            ffi::rocksdb_lru_cache_options_destroy(self.inner);
502        }
503    }
504}
505
506impl Drop for ReadOptions {
507    fn drop(&mut self) {
508        unsafe {
509            ffi::rocksdb_readoptions_destroy(self.inner);
510        }
511    }
512}
513
514impl Drop for IngestExternalFileOptions {
515    fn drop(&mut self) {
516        unsafe {
517            ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
518        }
519    }
520}
521
522impl BlockBasedOptions {
523    /// Approximate size of user data packed per block. Note that the
524    /// block size specified here corresponds to uncompressed data. The
525    /// actual size of the unit read from disk may be smaller if
526    /// compression is enabled. This parameter can be changed dynamically.
527    pub fn set_block_size(&mut self, size: usize) {
528        unsafe {
529            ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
530        }
531    }
532
533    /// Block size for partitioned metadata. Currently applied to indexes when
534    /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
535    /// Note: Since in the current implementation the filters and index partitions
536    /// are aligned, an index/filter block is created when either index or filter
537    /// block size reaches the specified limit.
538    ///
539    /// Note: this limit is currently applied to only index blocks; a filter
540    /// partition is cut right after an index block is cut.
541    pub fn set_metadata_block_size(&mut self, size: usize) {
542        unsafe {
543            ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
544        }
545    }
546
547    /// Note: currently this option requires kTwoLevelIndexSearch to be set as
548    /// well.
549    ///
550    /// Use partitioned full filters for each SST file. This option is
551    /// incompatible with block-based filters.
552    pub fn set_partition_filters(&mut self, size: bool) {
553        unsafe {
554            ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
555        }
556    }
557
558    /// Sets global cache for blocks (user data is stored in a set of blocks, and
559    /// a block is the unit of reading from disk).
560    ///
561    /// If set, use the specified cache for blocks.
562    /// By default, rocksdb will automatically create and use an 8MB internal cache.
563    pub fn set_block_cache(&mut self, cache: &Cache) {
564        unsafe {
565            ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
566        }
567        self.outlive.block_cache = Some(cache.clone());
568    }
569
570    /// Disable block cache
571    pub fn disable_cache(&mut self) {
572        unsafe {
573            ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
574        }
575    }
576
577    /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
578    /// policy to reduce disk reads.
579    ///
580    /// # Examples
581    ///
582    /// ```
583    /// use rocksdb::BlockBasedOptions;
584    ///
585    /// let mut opts = BlockBasedOptions::default();
586    /// opts.set_bloom_filter(10.0, true);
587    /// ```
588    pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
589        unsafe {
590            let bloom = if block_based {
591                ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
592            } else {
593                ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
594            };
595
596            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
597        }
598    }
599
600    /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
601    /// policy to reduce disk reads.
602    ///
603    /// Ribbon filters use less memory in exchange for slightly more CPU usage
604    /// compared to an equivalent bloom filter.
605    ///
606    /// # Examples
607    ///
608    /// ```
609    /// use rocksdb::BlockBasedOptions;
610    ///
611    /// let mut opts = BlockBasedOptions::default();
612    /// opts.set_ribbon_filter(10.0);
613    /// ```
614    pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
615        unsafe {
616            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
617            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
618        }
619    }
620
621    /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
622    /// policy to reduce disk reads.
623    ///
624    /// Uses Bloom filters before the given level, and Ribbon filters for all
625    /// other levels. This combines the memory savings from Ribbon filters
626    /// with the lower CPU usage of Bloom filters.
627    ///
628    /// # Examples
629    ///
630    /// ```
631    /// use rocksdb::BlockBasedOptions;
632    ///
633    /// let mut opts = BlockBasedOptions::default();
634    /// opts.set_hybrid_ribbon_filter(10.0, 2);
635    /// ```
636    pub fn set_hybrid_ribbon_filter(
637        &mut self,
638        bloom_equivalent_bits_per_key: c_double,
639        bloom_before_level: c_int,
640    ) {
641        unsafe {
642            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
643                bloom_equivalent_bits_per_key,
644                bloom_before_level,
645            );
646            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
647        }
648    }
649
650    /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
651    /// If set to true, depending on implementation of block cache,
652    /// index and filter blocks may be less likely to be evicted than data blocks.
653    pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
654        unsafe {
655            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
656                self.inner,
657                c_uchar::from(v),
658            );
659        }
660    }
661
662    /// Defines the index type to be used for SS-table lookups.
663    ///
664    /// # Examples
665    ///
666    /// ```
667    /// use rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
668    ///
669    /// let mut opts = Options::default();
670    /// let mut block_opts = BlockBasedOptions::default();
671    /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
672    /// ```
673    pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
674        let index = index_type as i32;
675        unsafe {
676            ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
677        }
678    }
679
680    /// If cache_index_and_filter_blocks is true and the below is true, then
681    /// filter and index blocks are stored in the cache, but a reference is
682    /// held in the "table reader" object so the blocks are pinned and only
683    /// evicted from cache when the table reader is freed.
684    ///
685    /// Default: false.
686    pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
687        unsafe {
688            ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
689                self.inner,
690                c_uchar::from(v),
691            );
692        }
693    }
694
695    /// If cache_index_and_filter_blocks is true and the below is true, then
696    /// the top-level index of partitioned filter and index blocks are stored in
697    /// the cache, but a reference is held in the "table reader" object so the
698    /// blocks are pinned and only evicted from cache when the table reader is
699    /// freed. This is not limited to l0 in LSM tree.
700    ///
701    /// Default: false.
702    pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
703        unsafe {
704            ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
705                self.inner,
706                c_uchar::from(v),
707            );
708        }
709    }
710
711    /// Format version, reserved for backward compatibility.
712    ///
713    /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
714    /// of the supported versions.
715    ///
716    /// Default: 5.
717    pub fn set_format_version(&mut self, version: i32) {
718        unsafe {
719            ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
720        }
721    }
722
723    /// Number of keys between restart points for delta encoding of keys.
724    /// This parameter can be changed dynamically. Most clients should
725    /// leave this parameter alone. The minimum value allowed is 1. Any smaller
726    /// value will be silently overwritten with 1.
727    ///
728    /// Default: 16.
729    pub fn set_block_restart_interval(&mut self, interval: i32) {
730        unsafe {
731            ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
732        }
733    }
734
735    /// Same as block_restart_interval but used for the index block.
736    /// If you don't plan to run RocksDB before version 5.16 and you are
737    /// using `index_block_restart_interval` > 1, you should
738    /// probably set the `format_version` to >= 4 as it would reduce the index size.
739    ///
740    /// Default: 1.
741    pub fn set_index_block_restart_interval(&mut self, interval: i32) {
742        unsafe {
743            ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
744        }
745    }
746
747    /// Set the data block index type for point lookups:
748    ///  `DataBlockIndexType::BinarySearch` to use binary search within the data block.
749    ///  `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
750    ///  the normal binary search.
751    ///
752    /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
753    /// valid only when using `DataBlockIndexType::BinaryAndHash`.
754    ///
755    /// Default: `BinarySearch`
756    /// # Examples
757    ///
758    /// ```
759    /// use rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
760    ///
761    /// let mut opts = Options::default();
762    /// let mut block_opts = BlockBasedOptions::default();
763    /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
764    /// block_opts.set_data_block_hash_ratio(0.85);
765    /// ```
766    pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
767        let index_t = index_type as i32;
768        unsafe {
769            ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
770        }
771    }
772
773    /// Set the data block hash index utilization ratio.
774    ///
775    /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
776    /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
777    /// lookup at the price of more space overhead.
778    ///
779    /// Default: 0.75
780    pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
781        unsafe {
782            ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
783        }
784    }
785
786    /// If false, place only prefixes in the filter, not whole keys.
787    ///
788    /// Defaults to true.
789    pub fn set_whole_key_filtering(&mut self, v: bool) {
790        unsafe {
791            ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
792        }
793    }
794
795    /// Use the specified checksum type.
796    /// Newly created table files will be protected with this checksum type.
797    /// Old table files will still be readable, even though they have different checksum type.
798    pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
799        unsafe {
800            ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
801        }
802    }
803
804    /// If true, generate Bloom/Ribbon filters that minimize memory internal
805    /// fragmentation.
806    /// See official [wiki](
807    /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
808    /// for more information.
809    ///
810    /// Defaults to false.
811    /// # Examples
812    ///
813    /// ```
814    /// use rocksdb::BlockBasedOptions;
815    ///
816    /// let mut opts = BlockBasedOptions::default();
817    /// opts.set_bloom_filter(10.0, true);
818    /// opts.set_optimize_filters_for_memory(true);
819    /// ```
820    pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
821        unsafe {
822            ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
823                self.inner,
824                c_uchar::from(v),
825            );
826        }
827    }
828
829    /// Set the top-level index pinning tier.
830    ///
831    /// Controls when top-level index blocks are pinned in block cache memory.
832    /// This affects memory usage and lookup performance for large databases with
833    /// multiple levels.
834    ///
835    /// Default: `BlockBasedTablePinningTier::Fallback`
836    ///
837    /// # Examples
838    ///
839    /// ```
840    /// use rocksdb::{BlockBasedOptions, BlockBasedTablePinningTier};
841    ///
842    /// let mut opts = BlockBasedOptions::default();
843    /// opts.set_top_level_index_pinning_tier(BlockBasedTablePinningTier::FlushAndSimilar);
844    /// ```
845    pub fn set_top_level_index_pinning_tier(&mut self, pinning_tier: BlockBasedTablePinningTier) {
846        unsafe {
847            ffi::rocksdb_block_based_options_set_top_level_index_pinning_tier(
848                self.inner,
849                pinning_tier as c_int,
850            );
851        }
852    }
853
854    /// Set the partition pinning tier.
855    ///
856    /// Controls when partition blocks (used in partitioned indexes and filters)
857    /// are pinned in block cache memory. This affects performance for databases
858    /// using partitioned metadata.
859    ///
860    /// Default: `BlockBasedTablePinningTier::Fallback`
861    ///
862    /// # Examples
863    ///
864    /// ```
865    /// use rocksdb::{BlockBasedOptions, BlockBasedTablePinningTier};
866    ///
867    /// let mut opts = BlockBasedOptions::default();
868    /// opts.set_partition_pinning_tier(BlockBasedTablePinningTier::All);
869    /// ```
870    pub fn set_partition_pinning_tier(&mut self, pinning_tier: BlockBasedTablePinningTier) {
871        unsafe {
872            ffi::rocksdb_block_based_options_set_partition_pinning_tier(
873                self.inner,
874                pinning_tier as c_int,
875            );
876        }
877    }
878
879    /// Set the unpartitioned pinning tier.
880    ///
881    /// Controls when unpartitioned metadata blocks (index and filter blocks that
882    /// are not partitioned) are pinned in block cache memory.
883    ///
884    /// Default: `BlockBasedTablePinningTier::Fallback`
885    ///
886    /// # Examples
887    ///
888    /// ```
889    /// use rocksdb::{BlockBasedOptions, BlockBasedTablePinningTier};
890    ///
891    /// let mut opts = BlockBasedOptions::default();
892    /// opts.set_unpartitioned_pinning_tier(BlockBasedTablePinningTier::None);
893    /// ```
894    pub fn set_unpartitioned_pinning_tier(&mut self, pinning_tier: BlockBasedTablePinningTier) {
895        unsafe {
896            ffi::rocksdb_block_based_options_set_unpartitioned_pinning_tier(
897                self.inner,
898                pinning_tier as c_int,
899            );
900        }
901    }
902}
903
904impl Default for BlockBasedOptions {
905    fn default() -> Self {
906        let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
907        assert!(
908            !block_opts.is_null(),
909            "Could not create RocksDB block based options"
910        );
911
912        Self {
913            inner: block_opts,
914            outlive: BlockBasedOptionsMustOutliveDB::default(),
915        }
916    }
917}
918
919impl CuckooTableOptions {
920    /// Determines the utilization of hash tables. Smaller values
921    /// result in larger hash tables with fewer collisions.
922    /// Default: 0.9
923    pub fn set_hash_ratio(&mut self, ratio: f64) {
924        unsafe {
925            ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
926        }
927    }
928
929    /// A property used by builder to determine the depth to go to
930    /// to search for a path to displace elements in case of
931    /// collision. See Builder.MakeSpaceForKey method. Higher
932    /// values result in more efficient hash tables with fewer
933    /// lookups but take more time to build.
934    /// Default: 100
935    pub fn set_max_search_depth(&mut self, depth: u32) {
936        unsafe {
937            ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
938        }
939    }
940
941    /// In case of collision while inserting, the builder
942    /// attempts to insert in the next cuckoo_block_size
943    /// locations before skipping over to the next Cuckoo hash
944    /// function. This makes lookups more cache friendly in case
945    /// of collisions.
946    /// Default: 5
947    pub fn set_cuckoo_block_size(&mut self, size: u32) {
948        unsafe {
949            ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
950        }
951    }
952
953    /// If this option is enabled, user key is treated as uint64_t and its value
954    /// is used as hash value directly. This option changes builder's behavior.
955    /// Reader ignore this option and behave according to what specified in
956    /// table property.
957    /// Default: false
958    pub fn set_identity_as_first_hash(&mut self, flag: bool) {
959        unsafe {
960            ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
961        }
962    }
963
964    /// If this option is set to true, module is used during hash calculation.
965    /// This often yields better space efficiency at the cost of performance.
966    /// If this option is set to false, # of entries in table is constrained to
967    /// be power of two, and bit and is used to calculate hash, which is faster in general.
968    /// Default: true
969    pub fn set_use_module_hash(&mut self, flag: bool) {
970        unsafe {
971            ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
972        }
973    }
974}
975
976impl Default for CuckooTableOptions {
977    fn default() -> Self {
978        let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
979        assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
980
981        Self { inner: opts }
982    }
983}
984
985// Verbosity of the LOG.
986#[derive(Debug, Copy, Clone, PartialEq, Eq)]
987#[repr(i32)]
988pub enum LogLevel {
989    Debug = 0,
990    Info,
991    Warn,
992    Error,
993    Fatal,
994    Header,
995}
996
997impl Options {
998    /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
999    /// latest RocksDB options file stored in the specified rocksdb database.
1000    ///
1001    /// *IMPORTANT*:
1002    /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
1003    /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
1004    /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
1005    pub fn load_latest<P: AsRef<Path>>(
1006        path: P,
1007        env: Env,
1008        ignore_unknown_options: bool,
1009        cache: Cache,
1010    ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
1011        let path = to_cpath(path)?;
1012        let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
1013        let mut num_column_families: usize = 0;
1014        let mut column_family_names: *mut *mut c_char = null_mut();
1015        let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
1016        unsafe {
1017            ffi_try!(ffi::rocksdb_load_latest_options(
1018                path.as_ptr(),
1019                env.0.inner,
1020                ignore_unknown_options,
1021                cache.0.inner.as_ptr(),
1022                &mut db_options,
1023                &mut num_column_families,
1024                &mut column_family_names,
1025                &mut column_family_options,
1026            ));
1027        }
1028        let options = Options {
1029            inner: db_options,
1030            outlive: OptionsMustOutliveDB::default(),
1031        };
1032        let column_families = unsafe {
1033            Options::read_column_descriptors(
1034                num_column_families,
1035                column_family_names,
1036                column_family_options,
1037            )
1038        };
1039        Ok((options, column_families))
1040    }
1041
1042    /// read column descriptors from c pointers
1043    #[inline]
1044    unsafe fn read_column_descriptors(
1045        num_column_families: usize,
1046        column_family_names: *mut *mut c_char,
1047        column_family_options: *mut *mut ffi::rocksdb_options_t,
1048    ) -> Vec<ColumnFamilyDescriptor> {
1049        let column_family_names_iter = unsafe {
1050            slice::from_raw_parts(column_family_names, num_column_families)
1051                .iter()
1052                .map(|ptr| from_cstr(*ptr))
1053        };
1054        let column_family_options_iter = unsafe {
1055            slice::from_raw_parts(column_family_options, num_column_families)
1056                .iter()
1057                .map(|ptr| Options {
1058                    inner: *ptr,
1059                    outlive: OptionsMustOutliveDB::default(),
1060                })
1061        };
1062        let column_descriptors = column_family_names_iter
1063            .zip(column_family_options_iter)
1064            .map(|(name, options)| ColumnFamilyDescriptor {
1065                name,
1066                options,
1067                ttl: ColumnFamilyTtl::Disabled,
1068            })
1069            .collect::<Vec<_>>();
1070
1071        // free pointers
1072        unsafe {
1073            slice::from_raw_parts(column_family_names, num_column_families)
1074                .iter()
1075                .for_each(|ptr| ffi::rocksdb_free(*ptr as *mut c_void));
1076            ffi::rocksdb_free(column_family_names as *mut c_void);
1077            ffi::rocksdb_free(column_family_options as *mut c_void);
1078        };
1079
1080        column_descriptors
1081    }
1082
1083    /// By default, RocksDB uses only one background thread for flush and
1084    /// compaction. Calling this function will set it up such that total of
1085    /// `total_threads` is used. Good value for `total_threads` is the number of
1086    /// cores. You almost definitely want to call this function if your system is
1087    /// bottlenecked by RocksDB.
1088    ///
1089    /// # Examples
1090    ///
1091    /// ```
1092    /// use rocksdb::Options;
1093    ///
1094    /// let mut opts = Options::default();
1095    /// opts.increase_parallelism(3);
1096    /// ```
1097    pub fn increase_parallelism(&mut self, parallelism: i32) {
1098        unsafe {
1099            ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
1100        }
1101    }
1102
1103    /// Optimize level style compaction.
1104    ///
1105    /// Default values for some parameters in `Options` are not optimized for heavy
1106    /// workloads and big datasets, which means you might observe write stalls under
1107    /// some conditions.
1108    ///
1109    /// This can be used as one of the starting points for tuning RocksDB options in
1110    /// such cases.
1111    ///
1112    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1113    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1114    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1115    /// parameters were set before.
1116    ///
1117    /// It sets buffer sizes so that memory consumption would be constrained by
1118    /// `memtable_memory_budget`.
1119    pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
1120        unsafe {
1121            ffi::rocksdb_options_optimize_level_style_compaction(
1122                self.inner,
1123                memtable_memory_budget as u64,
1124            );
1125        }
1126    }
1127
1128    /// Optimize universal style compaction.
1129    ///
1130    /// Default values for some parameters in `Options` are not optimized for heavy
1131    /// workloads and big datasets, which means you might observe write stalls under
1132    /// some conditions.
1133    ///
1134    /// This can be used as one of the starting points for tuning RocksDB options in
1135    /// such cases.
1136    ///
1137    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1138    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1139    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1140    /// parameters were set before.
1141    ///
1142    /// It sets buffer sizes so that memory consumption would be constrained by
1143    /// `memtable_memory_budget`.
1144    pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1145        unsafe {
1146            ffi::rocksdb_options_optimize_universal_style_compaction(
1147                self.inner,
1148                memtable_memory_budget as u64,
1149            );
1150        }
1151    }
1152
1153    /// If true, the database will be created if it is missing.
1154    ///
1155    /// Default: `false`
1156    ///
1157    /// # Examples
1158    ///
1159    /// ```
1160    /// use rocksdb::Options;
1161    ///
1162    /// let mut opts = Options::default();
1163    /// opts.create_if_missing(true);
1164    /// ```
1165    pub fn create_if_missing(&mut self, create_if_missing: bool) {
1166        unsafe {
1167            ffi::rocksdb_options_set_create_if_missing(
1168                self.inner,
1169                c_uchar::from(create_if_missing),
1170            );
1171        }
1172    }
1173
1174    /// If true, any column families that didn't exist when opening the database
1175    /// will be created.
1176    ///
1177    /// Default: `false`
1178    ///
1179    /// # Examples
1180    ///
1181    /// ```
1182    /// use rocksdb::Options;
1183    ///
1184    /// let mut opts = Options::default();
1185    /// opts.create_missing_column_families(true);
1186    /// ```
1187    pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1188        unsafe {
1189            ffi::rocksdb_options_set_create_missing_column_families(
1190                self.inner,
1191                c_uchar::from(create_missing_cfs),
1192            );
1193        }
1194    }
1195
1196    /// Specifies whether an error should be raised if the database already exists.
1197    ///
1198    /// Default: false
1199    pub fn set_error_if_exists(&mut self, enabled: bool) {
1200        unsafe {
1201            ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1202        }
1203    }
1204
1205    /// Enable/disable paranoid checks.
1206    ///
1207    /// If true, the implementation will do aggressive checking of the
1208    /// data it is processing and will stop early if it detects any
1209    /// errors. This may have unforeseen ramifications: for example, a
1210    /// corruption of one DB entry may cause a large number of entries to
1211    /// become unreadable or for the entire DB to become unopenable.
1212    /// If any of the  writes to the database fails (Put, Delete, Merge, Write),
1213    /// the database will switch to read-only mode and fail all other
1214    /// Write operations.
1215    ///
1216    /// Default: false
1217    pub fn set_paranoid_checks(&mut self, enabled: bool) {
1218        unsafe {
1219            ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1220        }
1221    }
1222
1223    /// A list of paths where SST files can be put into, with its target size.
1224    /// Newer data is placed into paths specified earlier in the vector while
1225    /// older data gradually moves to paths specified later in the vector.
1226    ///
1227    /// For example, you have a flash device with 10GB allocated for the DB,
1228    /// as well as a hard drive of 2TB, you should config it to be:
1229    ///   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1230    ///
1231    /// The system will try to guarantee data under each path is close to but
1232    /// not larger than the target size. But current and future file sizes used
1233    /// by determining where to place a file are based on best-effort estimation,
1234    /// which means there is a chance that the actual size under the directory
1235    /// is slightly more than target size under some workloads. User should give
1236    /// some buffer room for those cases.
1237    ///
1238    /// If none of the paths has sufficient room to place a file, the file will
1239    /// be placed to the last path anyway, despite to the target size.
1240    ///
1241    /// Placing newer data to earlier paths is also best-efforts. User should
1242    /// expect user files to be placed in higher levels in some extreme cases.
1243    ///
1244    /// If left empty, only one path will be used, which is `path` passed when
1245    /// opening the DB.
1246    ///
1247    /// Default: empty
1248    pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1249        let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1250        let num_paths = paths.len();
1251        unsafe {
1252            ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1253        }
1254    }
1255
1256    /// Use the specified object to interact with the environment,
1257    /// e.g. to read/write files, schedule background work, etc. In the near
1258    /// future, support for doing storage operations such as read/write files
1259    /// through env will be deprecated in favor of file_system.
1260    ///
1261    /// Default: Env::default()
1262    pub fn set_env(&mut self, env: &Env) {
1263        unsafe {
1264            ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1265        }
1266        self.outlive.env = Some(env.clone());
1267    }
1268
1269    /// Sets the compression algorithm that will be used for compressing blocks.
1270    ///
1271    /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1272    /// snappy feature is not enabled).
1273    ///
1274    /// # Examples
1275    ///
1276    /// ```
1277    /// use rocksdb::{Options, DBCompressionType};
1278    ///
1279    /// let mut opts = Options::default();
1280    /// opts.set_compression_type(DBCompressionType::Snappy);
1281    /// ```
1282    pub fn set_compression_type(&mut self, t: DBCompressionType) {
1283        unsafe {
1284            ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1285        }
1286    }
1287
1288    /// Number of threads for parallel compression.
1289    /// Parallel compression is enabled only if threads > 1.
1290    /// THE FEATURE IS STILL EXPERIMENTAL
1291    ///
1292    /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1293    /// for more information.
1294    ///
1295    /// Default: 1
1296    ///
1297    /// Examples
1298    ///
1299    /// ```
1300    /// use rocksdb::{Options, DBCompressionType};
1301    ///
1302    /// let mut opts = Options::default();
1303    /// opts.set_compression_type(DBCompressionType::Zstd);
1304    /// opts.set_compression_options_parallel_threads(3);
1305    /// ```
1306    pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1307        unsafe {
1308            ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1309        }
1310    }
1311
1312    /// Sets the compression algorithm that will be used for compressing WAL.
1313    ///
1314    /// At present, only ZSTD compression is supported!
1315    ///
1316    /// Default: `DBCompressionType::None`
1317    ///
1318    /// # Examples
1319    ///
1320    /// ```
1321    /// use rocksdb::{Options, DBCompressionType};
1322    ///
1323    /// let mut opts = Options::default();
1324    /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1325    /// // Or None to disable it
1326    /// opts.set_wal_compression_type(DBCompressionType::None);
1327    /// ```
1328    pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1329        match t {
1330            DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1331                ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1332            },
1333            other => unimplemented!("{:?} is not supported for WAL compression", other),
1334        }
1335    }
1336
1337    /// Sets the bottom-most compression algorithm that will be used for
1338    /// compressing blocks at the bottom-most level.
1339    ///
1340    /// Note that to actually enable bottom-most compression configuration after
1341    /// setting the compression type, it needs to be enabled by calling
1342    /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1343    /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1344    /// set to `true`.
1345    ///
1346    /// # Examples
1347    ///
1348    /// ```
1349    /// use rocksdb::{Options, DBCompressionType};
1350    ///
1351    /// let mut opts = Options::default();
1352    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1353    /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1354    /// ```
1355    pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1356        unsafe {
1357            ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1358        }
1359    }
1360
1361    /// Different levels can have different compression policies. There
1362    /// are cases where most lower levels would like to use quick compression
1363    /// algorithms while the higher levels (which have more data) use
1364    /// compression algorithms that have better compression but could
1365    /// be slower. This array, if non-empty, should have an entry for
1366    /// each level of the database; these override the value specified in
1367    /// the previous field 'compression'.
1368    ///
1369    /// # Examples
1370    ///
1371    /// ```
1372    /// use rocksdb::{Options, DBCompressionType};
1373    ///
1374    /// let mut opts = Options::default();
1375    /// opts.set_compression_per_level(&[
1376    ///     DBCompressionType::None,
1377    ///     DBCompressionType::None,
1378    ///     DBCompressionType::Snappy,
1379    ///     DBCompressionType::Snappy,
1380    ///     DBCompressionType::Snappy
1381    /// ]);
1382    /// ```
1383    pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1384        unsafe {
1385            let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1386            ffi::rocksdb_options_set_compression_per_level(
1387                self.inner,
1388                level_types.as_mut_ptr(),
1389                level_types.len() as size_t,
1390            );
1391        }
1392    }
1393
1394    /// Maximum size of dictionaries used to prime the compression library.
1395    /// Enabling dictionary can improve compression ratios when there are
1396    /// repetitions across data blocks.
1397    ///
1398    /// The dictionary is created by sampling the SST file data. If
1399    /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1400    /// dictionary generator. Otherwise, the random samples are used directly as
1401    /// the dictionary.
1402    ///
1403    /// When compression dictionary is disabled, we compress and write each block
1404    /// before buffering data for the next one. When compression dictionary is
1405    /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1406    /// can only be compressed and written after the dictionary has been finalized.
1407    /// So users of this feature may see increased memory usage.
1408    ///
1409    /// Default: `0`
1410    ///
1411    /// # Examples
1412    ///
1413    /// ```
1414    /// use rocksdb::Options;
1415    ///
1416    /// let mut opts = Options::default();
1417    /// opts.set_compression_options(4, 5, 6, 7);
1418    /// ```
1419    pub fn set_compression_options(
1420        &mut self,
1421        w_bits: c_int,
1422        level: c_int,
1423        strategy: c_int,
1424        max_dict_bytes: c_int,
1425    ) {
1426        unsafe {
1427            ffi::rocksdb_options_set_compression_options(
1428                self.inner,
1429                w_bits,
1430                level,
1431                strategy,
1432                max_dict_bytes,
1433            );
1434        }
1435    }
1436
1437    /// Sets compression options for blocks at the bottom-most level.  Meaning
1438    /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1439    /// affect only the bottom-most compression which is set using
1440    /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1441    ///
1442    /// # Examples
1443    ///
1444    /// ```
1445    /// use rocksdb::{Options, DBCompressionType};
1446    ///
1447    /// let mut opts = Options::default();
1448    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1449    /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1450    /// ```
1451    pub fn set_bottommost_compression_options(
1452        &mut self,
1453        w_bits: c_int,
1454        level: c_int,
1455        strategy: c_int,
1456        max_dict_bytes: c_int,
1457        enabled: bool,
1458    ) {
1459        unsafe {
1460            ffi::rocksdb_options_set_bottommost_compression_options(
1461                self.inner,
1462                w_bits,
1463                level,
1464                strategy,
1465                max_dict_bytes,
1466                c_uchar::from(enabled),
1467            );
1468        }
1469    }
1470
1471    /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1472    /// dictionary trainer can achieve even better compression ratio improvements than using
1473    /// `max_dict_bytes` alone.
1474    ///
1475    /// The training data will be used to generate a dictionary of max_dict_bytes.
1476    ///
1477    /// Default: 0.
1478    pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1479        unsafe {
1480            ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1481        }
1482    }
1483
1484    /// Sets maximum size of training data passed to zstd's dictionary trainer
1485    /// when compressing the bottom-most level. Using zstd's dictionary trainer
1486    /// can achieve even better compression ratio improvements than using
1487    /// `max_dict_bytes` alone.
1488    ///
1489    /// The training data will be used to generate a dictionary of
1490    /// `max_dict_bytes`.
1491    ///
1492    /// Default: 0.
1493    pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1494        unsafe {
1495            ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1496                self.inner,
1497                value,
1498                c_uchar::from(enabled),
1499            );
1500        }
1501    }
1502
1503    /// If non-zero, we perform bigger reads when doing compaction. If you're
1504    /// running RocksDB on spinning disks, you should set this to at least 2MB.
1505    /// That way RocksDB's compaction is doing sequential instead of random reads.
1506    ///
1507    /// Default: 2 * 1024 * 1024 (2 MB)
1508    pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1509        unsafe {
1510            ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1511        }
1512    }
1513
1514    /// Allow RocksDB to pick dynamic base of bytes for levels.
1515    /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1516    /// The goal of this feature is to have lower bound on size amplification.
1517    ///
1518    /// Default: false.
1519    pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1520        unsafe {
1521            ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1522                self.inner,
1523                c_uchar::from(v),
1524            );
1525        }
1526    }
1527
1528    /// This option has different meanings for different compaction styles:
1529    ///
1530    /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1531    /// for compaction and will be re-written to the same level as they were
1532    /// before.
1533    ///
1534    /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1535    ///
1536    /// Universal: when there are files older than `periodic_compaction_seconds`,
1537    /// rocksdb will try to do as large a compaction as possible including the
1538    /// last level. Such compaction is only skipped if only last level is to
1539    /// be compacted and no file in last level is older than
1540    /// `periodic_compaction_seconds`. See more in
1541    /// UniversalCompactionBuilder::PickPeriodicCompaction().
1542    /// For backward compatibility, the effective value of this option takes
1543    /// into account the value of option `ttl`. The logic is as follows:
1544    ///
1545    /// - both options are set to 30 days if they have the default value.
1546    /// - if both options are zero, zero is picked. Otherwise, we take the min
1547    ///   value among non-zero options values (i.e. takes the stricter limit).
1548    ///
1549    /// One main use of the feature is to make sure a file goes through compaction
1550    /// filters periodically. Users can also use the feature to clear up SST
1551    /// files using old format.
1552    ///
1553    /// A file's age is computed by looking at file_creation_time or creation_time
1554    /// table properties in order, if they have valid non-zero values; if not, the
1555    /// age is based on the file's last modified time (given by the underlying
1556    /// Env).
1557    ///
1558    /// This option only supports block based table format for any compaction
1559    /// style.
1560    ///
1561    /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1562    ///
1563    /// Values:
1564    /// 0: Turn off Periodic compactions.
1565    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1566    /// pick default.
1567    ///
1568    /// Default: 30 days if using block based table format + compaction filter +
1569    /// leveled compaction or block based table format + universal compaction.
1570    /// 0 (disabled) otherwise.
1571    ///
1572    pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1573        unsafe {
1574            ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1575        }
1576    }
1577
1578    pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1579        &mut self,
1580        name: impl CStrLike,
1581        full_merge_fn: F,
1582    ) {
1583        let cb = Box::new(MergeOperatorCallback {
1584            name: name.into_c_string().unwrap(),
1585            full_merge_fn: full_merge_fn.clone(),
1586            partial_merge_fn: full_merge_fn,
1587        });
1588
1589        unsafe {
1590            let mo = ffi::rocksdb_mergeoperator_create(
1591                Box::into_raw(cb).cast::<c_void>(),
1592                Some(merge_operator::destructor_callback::<F, F>),
1593                Some(full_merge_callback::<F, F>),
1594                Some(partial_merge_callback::<F, F>),
1595                Some(merge_operator::delete_callback),
1596                Some(merge_operator::name_callback::<F, F>),
1597            );
1598            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1599        }
1600    }
1601
1602    pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1603        &mut self,
1604        name: impl CStrLike,
1605        full_merge_fn: F,
1606        partial_merge_fn: PF,
1607    ) {
1608        let cb = Box::new(MergeOperatorCallback {
1609            name: name.into_c_string().unwrap(),
1610            full_merge_fn,
1611            partial_merge_fn,
1612        });
1613
1614        unsafe {
1615            let mo = ffi::rocksdb_mergeoperator_create(
1616                Box::into_raw(cb).cast::<c_void>(),
1617                Some(merge_operator::destructor_callback::<F, PF>),
1618                Some(full_merge_callback::<F, PF>),
1619                Some(partial_merge_callback::<F, PF>),
1620                Some(merge_operator::delete_callback),
1621                Some(merge_operator::name_callback::<F, PF>),
1622            );
1623            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1624        }
1625    }
1626
1627    #[deprecated(
1628        since = "0.5.0",
1629        note = "add_merge_operator has been renamed to set_merge_operator"
1630    )]
1631    pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1632        self.set_merge_operator_associative(name, merge_fn);
1633    }
1634
1635    /// Sets a compaction filter used to determine if entries should be kept, changed,
1636    /// or removed during compaction.
1637    ///
1638    /// An example use case is to remove entries with an expired TTL.
1639    ///
1640    /// If you take a snapshot of the database, only values written since the last
1641    /// snapshot will be passed through the compaction filter.
1642    ///
1643    /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1644    /// simultaneously.
1645    pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1646    where
1647        F: CompactionFilterFn + Send + 'static,
1648    {
1649        let cb = Box::new(CompactionFilterCallback {
1650            name: name.into_c_string().unwrap(),
1651            filter_fn,
1652        });
1653
1654        unsafe {
1655            let cf = ffi::rocksdb_compactionfilter_create(
1656                Box::into_raw(cb).cast::<c_void>(),
1657                Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1658                Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1659                Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1660            );
1661            ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1662        }
1663    }
1664
1665    /// This is a factory that provides compaction filter objects which allow
1666    /// an application to modify/delete a key-value during background compaction.
1667    ///
1668    /// A new filter will be created on each compaction run.  If multithreaded
1669    /// compaction is being used, each created CompactionFilter will only be used
1670    /// from a single thread and so does not need to be thread-safe.
1671    ///
1672    /// Default: nullptr
1673    pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1674    where
1675        F: CompactionFilterFactory + 'static,
1676    {
1677        let factory = Box::new(factory);
1678
1679        unsafe {
1680            let cff = ffi::rocksdb_compactionfilterfactory_create(
1681                Box::into_raw(factory).cast::<c_void>(),
1682                Some(compaction_filter_factory::destructor_callback::<F>),
1683                Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1684                Some(compaction_filter_factory::name_callback::<F>),
1685            );
1686
1687            ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1688        }
1689    }
1690
1691    /// Sets the comparator used to define the order of keys in the table.
1692    /// Default: a comparator that uses lexicographic byte-wise ordering
1693    ///
1694    /// The client must ensure that the comparator supplied here has the same
1695    /// name and orders keys *exactly* the same as the comparator provided to
1696    /// previous open calls on the same DB.
1697    pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1698        let cb = Box::new(ComparatorCallback {
1699            name: name.into_c_string().unwrap(),
1700            compare_fn,
1701        });
1702
1703        unsafe {
1704            let cmp = ffi::rocksdb_comparator_create(
1705                Box::into_raw(cb).cast::<c_void>(),
1706                Some(ComparatorCallback::destructor_callback),
1707                Some(ComparatorCallback::compare_callback),
1708                Some(ComparatorCallback::name_callback),
1709            );
1710            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1711        }
1712    }
1713
1714    /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1715    /// taking timestamp into consideration.
1716    /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1717    ///
1718    /// The client must ensure that the comparator supplied here has the same
1719    /// name and orders keys *exactly* the same as the comparator provided to
1720    /// previous open calls on the same DB.
1721    pub fn set_comparator_with_ts(
1722        &mut self,
1723        name: impl CStrLike,
1724        timestamp_size: usize,
1725        compare_fn: Box<CompareFn>,
1726        compare_ts_fn: Box<CompareTsFn>,
1727        compare_without_ts_fn: Box<CompareWithoutTsFn>,
1728    ) {
1729        let cb = Box::new(ComparatorWithTsCallback {
1730            name: name.into_c_string().unwrap(),
1731            compare_fn,
1732            compare_ts_fn,
1733            compare_without_ts_fn,
1734        });
1735
1736        unsafe {
1737            let cmp = ffi::rocksdb_comparator_with_ts_create(
1738                Box::into_raw(cb).cast::<c_void>(),
1739                Some(ComparatorWithTsCallback::destructor_callback),
1740                Some(ComparatorWithTsCallback::compare_callback),
1741                Some(ComparatorWithTsCallback::compare_ts_callback),
1742                Some(ComparatorWithTsCallback::compare_without_ts_callback),
1743                Some(ComparatorWithTsCallback::name_callback),
1744                timestamp_size,
1745            );
1746            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1747        }
1748    }
1749
1750    pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1751        unsafe {
1752            ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1753        }
1754    }
1755
1756    // Use this if you don't need to keep the data sorted, i.e. you'll never use
1757    // an iterator, only Put() and Get() API calls
1758    //
1759    pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1760        unsafe {
1761            ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1762        }
1763    }
1764
1765    /// Sets the optimize_filters_for_hits flag
1766    ///
1767    /// Default: `false`
1768    ///
1769    /// # Examples
1770    ///
1771    /// ```
1772    /// use rocksdb::Options;
1773    ///
1774    /// let mut opts = Options::default();
1775    /// opts.set_optimize_filters_for_hits(true);
1776    /// ```
1777    pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1778        unsafe {
1779            ffi::rocksdb_options_set_optimize_filters_for_hits(
1780                self.inner,
1781                c_int::from(optimize_for_hits),
1782            );
1783        }
1784    }
1785
1786    /// Sets the periodicity when obsolete files get deleted.
1787    ///
1788    /// The files that get out of scope by compaction
1789    /// process will still get automatically delete on every compaction,
1790    /// regardless of this setting.
1791    ///
1792    /// Default: 6 hours
1793    pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1794        unsafe {
1795            ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1796        }
1797    }
1798
1799    /// Prepare the DB for bulk loading.
1800    ///
1801    /// All data will be in level 0 without any automatic compaction.
1802    /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1803    /// from the database, because otherwise the read can be very slow.
1804    pub fn prepare_for_bulk_load(&mut self) {
1805        unsafe {
1806            ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1807        }
1808    }
1809
1810    /// Sets the number of open files that can be used by the DB. You may need to
1811    /// increase this if your database has a large working set. Value `-1` means
1812    /// files opened are always kept open. You can estimate number of files based
1813    /// on target_file_size_base and target_file_size_multiplier for level-based
1814    /// compaction. For universal-style compaction, you can usually set it to `-1`.
1815    ///
1816    /// Default: `-1`
1817    ///
1818    /// # Examples
1819    ///
1820    /// ```
1821    /// use rocksdb::Options;
1822    ///
1823    /// let mut opts = Options::default();
1824    /// opts.set_max_open_files(10);
1825    /// ```
1826    pub fn set_max_open_files(&mut self, nfiles: c_int) {
1827        unsafe {
1828            ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1829        }
1830    }
1831
1832    /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1833    /// use this option to increase the number of threads used to open the files.
1834    /// Default: 16
1835    pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1836        unsafe {
1837            ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1838        }
1839    }
1840
1841    /// By default, writes to stable storage use fdatasync (on platforms
1842    /// where this function is available). If this option is true,
1843    /// fsync is used instead.
1844    ///
1845    /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1846    /// faster, so it is rarely necessary to set this option. It is provided
1847    /// as a workaround for kernel/filesystem bugs, such as one that affected
1848    /// fdatasync with ext4 in kernel versions prior to 3.7.
1849    ///
1850    /// Default: `false`
1851    ///
1852    /// # Examples
1853    ///
1854    /// ```
1855    /// use rocksdb::Options;
1856    ///
1857    /// let mut opts = Options::default();
1858    /// opts.set_use_fsync(true);
1859    /// ```
1860    pub fn set_use_fsync(&mut self, useit: bool) {
1861        unsafe {
1862            ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1863        }
1864    }
1865
1866    /// Returns the value of the `use_fsync` option.
1867    pub fn get_use_fsync(&self) -> bool {
1868        let val = unsafe { ffi::rocksdb_options_get_use_fsync(self.inner) };
1869        val != 0
1870    }
1871
1872    /// Specifies the absolute info LOG dir.
1873    ///
1874    /// If it is empty, the log files will be in the same dir as data.
1875    /// If it is non empty, the log files will be in the specified dir,
1876    /// and the db data dir's absolute path will be used as the log file
1877    /// name's prefix.
1878    ///
1879    /// Default: empty
1880    pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1881        let p = to_cpath(path).unwrap();
1882        unsafe {
1883            ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1884        }
1885    }
1886
1887    /// Specifies the log level.
1888    /// Consider the `LogLevel` enum for a list of possible levels.
1889    ///
1890    /// Default: Info
1891    ///
1892    /// # Examples
1893    ///
1894    /// ```
1895    /// use rocksdb::{Options, LogLevel};
1896    ///
1897    /// let mut opts = Options::default();
1898    /// opts.set_log_level(LogLevel::Warn);
1899    /// ```
1900    pub fn set_log_level(&mut self, level: LogLevel) {
1901        unsafe {
1902            ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1903        }
1904    }
1905
1906    /// Allows OS to incrementally sync files to disk while they are being
1907    /// written, asynchronously, in the background. This operation can be used
1908    /// to smooth out write I/Os over time. Users shouldn't rely on it for
1909    /// persistency guarantee.
1910    /// Issue one request for every bytes_per_sync written. `0` turns it off.
1911    ///
1912    /// Default: `0`
1913    ///
1914    /// You may consider using rate_limiter to regulate write rate to device.
1915    /// When rate limiter is enabled, it automatically enables bytes_per_sync
1916    /// to 1MB.
1917    ///
1918    /// This option applies to table files
1919    ///
1920    /// # Examples
1921    ///
1922    /// ```
1923    /// use rocksdb::Options;
1924    ///
1925    /// let mut opts = Options::default();
1926    /// opts.set_bytes_per_sync(1024 * 1024);
1927    /// ```
1928    pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1929        unsafe {
1930            ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1931        }
1932    }
1933
1934    /// Same as bytes_per_sync, but applies to WAL files.
1935    ///
1936    /// Default: 0, turned off
1937    ///
1938    /// Dynamically changeable through SetDBOptions() API.
1939    pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1940        unsafe {
1941            ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1942        }
1943    }
1944
1945    /// Sets the maximum buffer size that is used by WritableFileWriter.
1946    ///
1947    /// On Windows, we need to maintain an aligned buffer for writes.
1948    /// We allow the buffer to grow until it's size hits the limit in buffered
1949    /// IO and fix the buffer size when using direct IO to ensure alignment of
1950    /// write requests if the logical sector size is unusual
1951    ///
1952    /// Default: 1024 * 1024 (1 MB)
1953    ///
1954    /// Dynamically changeable through SetDBOptions() API.
1955    pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1956        unsafe {
1957            ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1958        }
1959    }
1960
1961    /// If true, allow multi-writers to update mem tables in parallel.
1962    /// Only some memtable_factory-s support concurrent writes; currently it
1963    /// is implemented only for SkipListFactory.  Concurrent memtable writes
1964    /// are not compatible with inplace_update_support or filter_deletes.
1965    /// It is strongly recommended to set enable_write_thread_adaptive_yield
1966    /// if you are going to use this feature.
1967    ///
1968    /// Default: true
1969    ///
1970    /// # Examples
1971    ///
1972    /// ```
1973    /// use rocksdb::Options;
1974    ///
1975    /// let mut opts = Options::default();
1976    /// opts.set_allow_concurrent_memtable_write(false);
1977    /// ```
1978    pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1979        unsafe {
1980            ffi::rocksdb_options_set_allow_concurrent_memtable_write(
1981                self.inner,
1982                c_uchar::from(allow),
1983            );
1984        }
1985    }
1986
1987    /// If true, threads synchronizing with the write batch group leader will wait for up to
1988    /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1989    /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1990    /// is enabled.
1991    ///
1992    /// Default: true
1993    pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1994        unsafe {
1995            ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1996                self.inner,
1997                c_uchar::from(enabled),
1998            );
1999        }
2000    }
2001
2002    /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
2003    ///
2004    /// This number specifies the number of keys (with the same userkey)
2005    /// that will be sequentially skipped before a reseek is issued.
2006    ///
2007    /// Default: 8
2008    pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
2009        unsafe {
2010            ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
2011        }
2012    }
2013
2014    /// Enable direct I/O mode for reading
2015    /// they may or may not improve performance depending on the use case
2016    ///
2017    /// Files will be opened in "direct I/O" mode
2018    /// which means that data read from the disk will not be cached or
2019    /// buffered. The hardware buffer of the devices may however still
2020    /// be used. Memory mapped files are not impacted by these parameters.
2021    ///
2022    /// Default: false
2023    ///
2024    /// # Examples
2025    ///
2026    /// ```
2027    /// use rocksdb::Options;
2028    ///
2029    /// let mut opts = Options::default();
2030    /// opts.set_use_direct_reads(true);
2031    /// ```
2032    pub fn set_use_direct_reads(&mut self, enabled: bool) {
2033        unsafe {
2034            ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
2035        }
2036    }
2037
2038    /// Enable direct I/O mode for flush and compaction
2039    ///
2040    /// Files will be opened in "direct I/O" mode
2041    /// which means that data written to the disk will not be cached or
2042    /// buffered. The hardware buffer of the devices may however still
2043    /// be used. Memory mapped files are not impacted by these parameters.
2044    /// they may or may not improve performance depending on the use case
2045    ///
2046    /// Default: false
2047    ///
2048    /// # Examples
2049    ///
2050    /// ```
2051    /// use rocksdb::Options;
2052    ///
2053    /// let mut opts = Options::default();
2054    /// opts.set_use_direct_io_for_flush_and_compaction(true);
2055    /// ```
2056    pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
2057        unsafe {
2058            ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
2059                self.inner,
2060                c_uchar::from(enabled),
2061            );
2062        }
2063    }
2064
2065    /// Enable/disable child process inherit open files.
2066    ///
2067    /// Default: true
2068    pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
2069        unsafe {
2070            ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
2071        }
2072    }
2073
2074    /// Hints to the OS that it should not buffer disk I/O. Enabling this
2075    /// parameter may improve performance but increases pressure on the
2076    /// system cache.
2077    ///
2078    /// The exact behavior of this parameter is platform dependent.
2079    ///
2080    /// On POSIX systems, after RocksDB reads data from disk it will
2081    /// mark the pages as "unneeded". The operating system may or may not
2082    /// evict these pages from memory, reducing pressure on the system
2083    /// cache. If the disk block is requested again this can result in
2084    /// additional disk I/O.
2085    ///
2086    /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2087    /// which means that data read from the disk will not be cached or
2088    /// bufferized. The hardware buffer of the devices may however still
2089    /// be used. Memory mapped files are not impacted by this parameter.
2090    ///
2091    /// Default: true
2092    ///
2093    /// # Examples
2094    ///
2095    /// ```
2096    /// use rocksdb::Options;
2097    ///
2098    /// let mut opts = Options::default();
2099    /// #[allow(deprecated)]
2100    /// opts.set_allow_os_buffer(false);
2101    /// ```
2102    #[deprecated(
2103        since = "0.7.0",
2104        note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2105    )]
2106    pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2107        self.set_use_direct_reads(!is_allow);
2108        self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2109    }
2110
2111    /// Sets the number of shards used for table cache.
2112    ///
2113    /// Default: `6`
2114    ///
2115    /// # Examples
2116    ///
2117    /// ```
2118    /// use rocksdb::Options;
2119    ///
2120    /// let mut opts = Options::default();
2121    /// opts.set_table_cache_num_shard_bits(4);
2122    /// ```
2123    pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2124        unsafe {
2125            ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2126        }
2127    }
2128
2129    /// By default target_file_size_multiplier is 1, which means
2130    /// by default files in different levels will have similar size.
2131    ///
2132    /// Dynamically changeable through SetOptions() API
2133    pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2134        unsafe {
2135            ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2136        }
2137    }
2138
2139    /// Sets the minimum number of write buffers that will be merged
2140    /// before writing to storage.  If set to `1`, then
2141    /// all write buffers are flushed to L0 as individual files and this increases
2142    /// read amplification because a get request has to check in all of these
2143    /// files. Also, an in-memory merge may result in writing lesser
2144    /// data to storage if there are duplicate records in each of these
2145    /// individual write buffers.
2146    ///
2147    /// Default: `1`
2148    ///
2149    /// # Examples
2150    ///
2151    /// ```
2152    /// use rocksdb::Options;
2153    ///
2154    /// let mut opts = Options::default();
2155    /// opts.set_min_write_buffer_number(2);
2156    /// ```
2157    pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2158        unsafe {
2159            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2160        }
2161    }
2162
2163    /// Sets the maximum number of write buffers that are built up in memory.
2164    /// The default and the minimum number is 2, so that when 1 write buffer
2165    /// is being flushed to storage, new writes can continue to the other
2166    /// write buffer.
2167    /// If max_write_buffer_number > 3, writing will be slowed down to
2168    /// options.delayed_write_rate if we are writing to the last write buffer
2169    /// allowed.
2170    ///
2171    /// Default: `2`
2172    ///
2173    /// # Examples
2174    ///
2175    /// ```
2176    /// use rocksdb::Options;
2177    ///
2178    /// let mut opts = Options::default();
2179    /// opts.set_max_write_buffer_number(4);
2180    /// ```
2181    pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2182        unsafe {
2183            ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2184        }
2185    }
2186
2187    /// Sets the amount of data to build up in memory (backed by an unsorted log
2188    /// on disk) before converting to a sorted on-disk file.
2189    ///
2190    /// Larger values increase performance, especially during bulk loads.
2191    /// Up to max_write_buffer_number write buffers may be held in memory
2192    /// at the same time,
2193    /// so you may wish to adjust this parameter to control memory usage.
2194    /// Also, a larger write buffer will result in a longer recovery time
2195    /// the next time the database is opened.
2196    ///
2197    /// Note that write_buffer_size is enforced per column family.
2198    /// See db_write_buffer_size for sharing memory across column families.
2199    ///
2200    /// Default: `0x4000000` (64MiB)
2201    ///
2202    /// Dynamically changeable through SetOptions() API
2203    ///
2204    /// # Examples
2205    ///
2206    /// ```
2207    /// use rocksdb::Options;
2208    ///
2209    /// let mut opts = Options::default();
2210    /// opts.set_write_buffer_size(128 * 1024 * 1024);
2211    /// ```
2212    pub fn set_write_buffer_size(&mut self, size: usize) {
2213        unsafe {
2214            ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2215        }
2216    }
2217
2218    /// Amount of data to build up in memtables across all column
2219    /// families before writing to disk.
2220    ///
2221    /// This is distinct from write_buffer_size, which enforces a limit
2222    /// for a single memtable.
2223    ///
2224    /// This feature is disabled by default. Specify a non-zero value
2225    /// to enable it.
2226    ///
2227    /// Default: 0 (disabled)
2228    ///
2229    /// # Examples
2230    ///
2231    /// ```
2232    /// use rocksdb::Options;
2233    ///
2234    /// let mut opts = Options::default();
2235    /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2236    /// ```
2237    pub fn set_db_write_buffer_size(&mut self, size: usize) {
2238        unsafe {
2239            ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2240        }
2241    }
2242
2243    /// Control maximum total data size for a level.
2244    /// max_bytes_for_level_base is the max total for level-1.
2245    /// Maximum number of bytes for level L can be calculated as
2246    /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2247    /// For example, if max_bytes_for_level_base is 200MB, and if
2248    /// max_bytes_for_level_multiplier is 10, total data size for level-1
2249    /// will be 200MB, total file size for level-2 will be 2GB,
2250    /// and total file size for level-3 will be 20GB.
2251    ///
2252    /// Default: `0x10000000` (256MiB).
2253    ///
2254    /// Dynamically changeable through SetOptions() API
2255    ///
2256    /// # Examples
2257    ///
2258    /// ```
2259    /// use rocksdb::Options;
2260    ///
2261    /// let mut opts = Options::default();
2262    /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2263    /// ```
2264    pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2265        unsafe {
2266            ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2267        }
2268    }
2269
2270    /// Default: `10`
2271    ///
2272    /// # Examples
2273    ///
2274    /// ```
2275    /// use rocksdb::Options;
2276    ///
2277    /// let mut opts = Options::default();
2278    /// opts.set_max_bytes_for_level_multiplier(4.0);
2279    /// ```
2280    pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2281        unsafe {
2282            ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2283        }
2284    }
2285
2286    /// The manifest file is rolled over on reaching this limit.
2287    /// The older manifest file be deleted.
2288    /// The default value is MAX_INT so that roll-over does not take place.
2289    ///
2290    /// # Examples
2291    ///
2292    /// ```
2293    /// use rocksdb::Options;
2294    ///
2295    /// let mut opts = Options::default();
2296    /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2297    /// ```
2298    pub fn set_max_manifest_file_size(&mut self, size: usize) {
2299        unsafe {
2300            ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2301        }
2302    }
2303
2304    /// Sets the target file size for compaction.
2305    /// target_file_size_base is per-file size for level-1.
2306    /// Target file size for level L can be calculated by
2307    /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2308    /// For example, if target_file_size_base is 2MB and
2309    /// target_file_size_multiplier is 10, then each file on level-1 will
2310    /// be 2MB, and each file on level 2 will be 20MB,
2311    /// and each file on level-3 will be 200MB.
2312    ///
2313    /// Default: `0x4000000` (64MiB)
2314    ///
2315    /// Dynamically changeable through SetOptions() API
2316    ///
2317    /// # Examples
2318    ///
2319    /// ```
2320    /// use rocksdb::Options;
2321    ///
2322    /// let mut opts = Options::default();
2323    /// opts.set_target_file_size_base(128 * 1024 * 1024);
2324    /// ```
2325    pub fn set_target_file_size_base(&mut self, size: u64) {
2326        unsafe {
2327            ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2328        }
2329    }
2330
2331    /// Sets the minimum number of write buffers that will be merged together
2332    /// before writing to storage.  If set to `1`, then
2333    /// all write buffers are flushed to L0 as individual files and this increases
2334    /// read amplification because a get request has to check in all of these
2335    /// files. Also, an in-memory merge may result in writing lesser
2336    /// data to storage if there are duplicate records in each of these
2337    /// individual write buffers.
2338    ///
2339    /// Default: `1`
2340    ///
2341    /// # Examples
2342    ///
2343    /// ```
2344    /// use rocksdb::Options;
2345    ///
2346    /// let mut opts = Options::default();
2347    /// opts.set_min_write_buffer_number_to_merge(2);
2348    /// ```
2349    pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2350        unsafe {
2351            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2352        }
2353    }
2354
2355    /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2356    /// level-0 compaction will not be triggered by number of files at all.
2357    ///
2358    /// Default: `4`
2359    ///
2360    /// Dynamically changeable through SetOptions() API
2361    ///
2362    /// # Examples
2363    ///
2364    /// ```
2365    /// use rocksdb::Options;
2366    ///
2367    /// let mut opts = Options::default();
2368    /// opts.set_level_zero_file_num_compaction_trigger(8);
2369    /// ```
2370    pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2371        unsafe {
2372            ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2373        }
2374    }
2375
2376    /// Sets the compaction priority. When multiple files are picked for compaction from a level,
2377    /// this option determines which files to pick first.
2378    ///
2379    /// Default: `CompactionPri::ByCompensatedSize`
2380    ///
2381    /// Dynamically changeable through SetOptions() API
2382    ///
2383    /// See [rocksdb post](https://github.com/facebook/rocksdb/blob/f20d12adc85ece3e75fb238872959c702c0e5535/docs/_posts/2016-01-29-compaction_pri.markdown) for more details.
2384    ///
2385    /// # Examples
2386    ///
2387    /// ```
2388    /// use rocksdb::{Options, CompactionPri};
2389    ///
2390    /// let mut opts = Options::default();
2391    /// opts.set_compaction_pri(CompactionPri::MinOverlappingRatio);
2392    /// ```
2393    pub fn set_compaction_pri(&mut self, pri: CompactionPri) {
2394        unsafe {
2395            ffi::rocksdb_options_set_compaction_pri(self.inner, pri as i32);
2396        }
2397    }
2398
2399    /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2400    /// point. A value < `0` means that no writing slowdown will be triggered by
2401    /// number of files in level-0.
2402    ///
2403    /// Default: `20`
2404    ///
2405    /// Dynamically changeable through SetOptions() API
2406    ///
2407    /// # Examples
2408    ///
2409    /// ```
2410    /// use rocksdb::Options;
2411    ///
2412    /// let mut opts = Options::default();
2413    /// opts.set_level_zero_slowdown_writes_trigger(10);
2414    /// ```
2415    pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2416        unsafe {
2417            ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2418        }
2419    }
2420
2421    /// Sets the maximum number of level-0 files.  We stop writes at this point.
2422    ///
2423    /// Default: `24`
2424    ///
2425    /// Dynamically changeable through SetOptions() API
2426    ///
2427    /// # Examples
2428    ///
2429    /// ```
2430    /// use rocksdb::Options;
2431    ///
2432    /// let mut opts = Options::default();
2433    /// opts.set_level_zero_stop_writes_trigger(48);
2434    /// ```
2435    pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2436        unsafe {
2437            ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2438        }
2439    }
2440
2441    /// Sets the compaction style.
2442    ///
2443    /// Default: DBCompactionStyle::Level
2444    ///
2445    /// # Examples
2446    ///
2447    /// ```
2448    /// use rocksdb::{Options, DBCompactionStyle};
2449    ///
2450    /// let mut opts = Options::default();
2451    /// opts.set_compaction_style(DBCompactionStyle::Universal);
2452    /// ```
2453    pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2454        unsafe {
2455            ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2456        }
2457    }
2458
2459    /// Sets the options needed to support Universal Style compactions.
2460    pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2461        unsafe {
2462            ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2463        }
2464    }
2465
2466    /// Sets the options for FIFO compaction style.
2467    pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2468        unsafe {
2469            ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2470        }
2471    }
2472
2473    /// Sets unordered_write to true trades higher write throughput with
2474    /// relaxing the immutability guarantee of snapshots. This violates the
2475    /// repeatability one expects from ::Get from a snapshot, as well as
2476    /// ::MultiGet and Iterator's consistent-point-in-time view property.
2477    /// If the application cannot tolerate the relaxed guarantees, it can implement
2478    /// its own mechanisms to work around that and yet benefit from the higher
2479    /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2480    /// two_write_queues=true is one way to achieve immutable snapshots despite
2481    /// unordered_write.
2482    ///
2483    /// By default, i.e., when it is false, rocksdb does not advance the sequence
2484    /// number for new snapshots unless all the writes with lower sequence numbers
2485    /// are already finished. This provides the immutability that we expect from
2486    /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2487    /// snapshots, the snapshot immutability results into Iterator and MultiGet
2488    /// offering consistent-point-in-time view. If set to true, although
2489    /// Read-Your-Own-Write property is still provided, the snapshot immutability
2490    /// property is relaxed: the writes issued after the snapshot is obtained (with
2491    /// larger sequence numbers) will be still not visible to the reads from that
2492    /// snapshot, however, there still might be pending writes (with lower sequence
2493    /// number) that will change the state visible to the snapshot after they are
2494    /// landed to the memtable.
2495    ///
2496    /// Default: false
2497    pub fn set_unordered_write(&mut self, unordered: bool) {
2498        unsafe {
2499            ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2500        }
2501    }
2502
2503    /// Sets maximum number of threads that will
2504    /// concurrently perform a compaction job by breaking it into multiple,
2505    /// smaller ones that are run simultaneously.
2506    ///
2507    /// Default: 1 (i.e. no subcompactions)
2508    pub fn set_max_subcompactions(&mut self, num: u32) {
2509        unsafe {
2510            ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2511        }
2512    }
2513
2514    /// Sets maximum number of concurrent background jobs
2515    /// (compactions and flushes).
2516    ///
2517    /// Default: 2
2518    ///
2519    /// Dynamically changeable through SetDBOptions() API.
2520    pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2521        unsafe {
2522            ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2523        }
2524    }
2525
2526    /// Sets the maximum number of concurrent background compaction jobs, submitted to
2527    /// the default LOW priority thread pool.
2528    /// We first try to schedule compactions based on
2529    /// `base_background_compactions`. If the compaction cannot catch up , we
2530    /// will increase number of compaction threads up to
2531    /// `max_background_compactions`.
2532    ///
2533    /// If you're increasing this, also consider increasing number of threads in
2534    /// LOW priority thread pool. For more information, see
2535    /// Env::SetBackgroundThreads
2536    ///
2537    /// Default: `1`
2538    ///
2539    /// # Examples
2540    ///
2541    /// ```
2542    /// use rocksdb::Options;
2543    ///
2544    /// let mut opts = Options::default();
2545    /// #[allow(deprecated)]
2546    /// opts.set_max_background_compactions(2);
2547    /// ```
2548    #[deprecated(
2549        since = "0.15.0",
2550        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2551    )]
2552    pub fn set_max_background_compactions(&mut self, n: c_int) {
2553        unsafe {
2554            ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2555        }
2556    }
2557
2558    /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2559    /// the HIGH priority thread pool.
2560    ///
2561    /// By default, all background jobs (major compaction and memtable flush) go
2562    /// to the LOW priority pool. If this option is set to a positive number,
2563    /// memtable flush jobs will be submitted to the HIGH priority pool.
2564    /// It is important when the same Env is shared by multiple db instances.
2565    /// Without a separate pool, long running major compaction jobs could
2566    /// potentially block memtable flush jobs of other db instances, leading to
2567    /// unnecessary Put stalls.
2568    ///
2569    /// If you're increasing this, also consider increasing number of threads in
2570    /// HIGH priority thread pool. For more information, see
2571    /// Env::SetBackgroundThreads
2572    ///
2573    /// Default: `1`
2574    ///
2575    /// # Examples
2576    ///
2577    /// ```
2578    /// use rocksdb::Options;
2579    ///
2580    /// let mut opts = Options::default();
2581    /// #[allow(deprecated)]
2582    /// opts.set_max_background_flushes(2);
2583    /// ```
2584    #[deprecated(
2585        since = "0.15.0",
2586        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2587    )]
2588    pub fn set_max_background_flushes(&mut self, n: c_int) {
2589        unsafe {
2590            ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2591        }
2592    }
2593
2594    /// Disables automatic compactions. Manual compactions can still
2595    /// be issued on this column family
2596    ///
2597    /// Default: `false`
2598    ///
2599    /// Dynamically changeable through SetOptions() API
2600    ///
2601    /// # Examples
2602    ///
2603    /// ```
2604    /// use rocksdb::Options;
2605    ///
2606    /// let mut opts = Options::default();
2607    /// opts.set_disable_auto_compactions(true);
2608    /// ```
2609    pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2610        unsafe {
2611            ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2612        }
2613    }
2614
2615    /// SetMemtableHugePageSize sets the page size for huge page for
2616    /// arena used by the memtable.
2617    /// If <=0, it won't allocate from huge page but from malloc.
2618    /// Users are responsible to reserve huge pages for it to be allocated. For
2619    /// example:
2620    ///      sysctl -w vm.nr_hugepages=20
2621    /// See linux doc Documentation/vm/hugetlbpage.txt
2622    /// If there isn't enough free huge page available, it will fall back to
2623    /// malloc.
2624    ///
2625    /// Dynamically changeable through SetOptions() API
2626    pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2627        unsafe {
2628            ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2629        }
2630    }
2631
2632    /// Sets the maximum number of successive merge operations on a key in the memtable.
2633    ///
2634    /// When a merge operation is added to the memtable and the maximum number of
2635    /// successive merges is reached, the value of the key will be calculated and
2636    /// inserted into the memtable instead of the merge operation. This will
2637    /// ensure that there are never more than max_successive_merges merge
2638    /// operations in the memtable.
2639    ///
2640    /// Default: 0 (disabled)
2641    pub fn set_max_successive_merges(&mut self, num: usize) {
2642        unsafe {
2643            ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2644        }
2645    }
2646
2647    /// Control locality of bloom filter probes to improve cache miss rate.
2648    /// This option only applies to memtable prefix bloom and plaintable
2649    /// prefix bloom. It essentially limits the max number of cache lines each
2650    /// bloom filter check can touch.
2651    ///
2652    /// This optimization is turned off when set to 0. The number should never
2653    /// be greater than number of probes. This option can boost performance
2654    /// for in-memory workload but should use with care since it can cause
2655    /// higher false positive rate.
2656    ///
2657    /// Default: 0
2658    pub fn set_bloom_locality(&mut self, v: u32) {
2659        unsafe {
2660            ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2661        }
2662    }
2663
2664    /// Enable/disable thread-safe inplace updates.
2665    ///
2666    /// Requires updates if
2667    /// * key exists in current memtable
2668    /// * new sizeof(new_value) <= sizeof(old_value)
2669    /// * old_value for that key is a put i.e. kTypeValue
2670    ///
2671    /// Default: false.
2672    pub fn set_inplace_update_support(&mut self, enabled: bool) {
2673        unsafe {
2674            ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2675        }
2676    }
2677
2678    /// Sets the number of locks used for inplace update.
2679    ///
2680    /// Default: 10000 when inplace_update_support = true, otherwise 0.
2681    pub fn set_inplace_update_locks(&mut self, num: usize) {
2682        unsafe {
2683            ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2684        }
2685    }
2686
2687    /// Different max-size multipliers for different levels.
2688    /// These are multiplied by max_bytes_for_level_multiplier to arrive
2689    /// at the max-size of each level.
2690    ///
2691    /// Default: 1
2692    ///
2693    /// Dynamically changeable through SetOptions() API
2694    pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2695        let count = level_values.len();
2696        unsafe {
2697            ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2698                self.inner,
2699                level_values.as_ptr().cast_mut(),
2700                count,
2701            );
2702        }
2703    }
2704
2705    /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2706    /// This may significantly speed up startup if there are many sst files,
2707    /// especially when using non-default Env with expensive GetFileSize().
2708    /// We'll still check that all required sst files exist.
2709    /// If paranoid_checks is false, this option is ignored, and sst files are
2710    /// not checked at all.
2711    ///
2712    /// Default: false
2713    #[deprecated(note = "RocksDB >= 10.5: option is ignored: checking done with a thread pool")]
2714    pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2715        unsafe {
2716            ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2717                self.inner,
2718                c_uchar::from(value),
2719            );
2720        }
2721    }
2722
2723    /// The total maximum size(bytes) of write buffers to maintain in memory
2724    /// including copies of buffers that have already been flushed. This parameter
2725    /// only affects trimming of flushed buffers and does not affect flushing.
2726    /// This controls the maximum amount of write history that will be available
2727    /// in memory for conflict checking when Transactions are used. The actual
2728    /// size of write history (flushed Memtables) might be higher than this limit
2729    /// if further trimming will reduce write history total size below this
2730    /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2731    /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2732    /// Because trimming the next Memtable of size 20MB will reduce total memory
2733    /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2734    ///
2735    /// When using an OptimisticTransactionDB:
2736    /// If this value is too low, some transactions may fail at commit time due
2737    /// to not being able to determine whether there were any write conflicts.
2738    ///
2739    /// When using a TransactionDB:
2740    /// If Transaction::SetSnapshot is used, TransactionDB will read either
2741    /// in-memory write buffers or SST files to do write-conflict checking.
2742    /// Increasing this value can reduce the number of reads to SST files
2743    /// done for conflict detection.
2744    ///
2745    /// Setting this value to 0 will cause write buffers to be freed immediately
2746    /// after they are flushed. If this value is set to -1,
2747    /// 'max_write_buffer_number * write_buffer_size' will be used.
2748    ///
2749    /// Default:
2750    /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2751    /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2752    /// if it is not explicitly set by the user.  Otherwise, the default is 0.
2753    pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2754        unsafe {
2755            ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2756        }
2757    }
2758
2759    /// By default, a single write thread queue is maintained. The thread gets
2760    /// to the head of the queue becomes write batch group leader and responsible
2761    /// for writing to WAL and memtable for the batch group.
2762    ///
2763    /// If enable_pipelined_write is true, separate write thread queue is
2764    /// maintained for WAL write and memtable write. A write thread first enter WAL
2765    /// writer queue and then memtable writer queue. Pending thread on the WAL
2766    /// writer queue thus only have to wait for previous writers to finish their
2767    /// WAL writing but not the memtable writing. Enabling the feature may improve
2768    /// write throughput and reduce latency of the prepare phase of two-phase
2769    /// commit.
2770    ///
2771    /// Default: false
2772    pub fn set_enable_pipelined_write(&mut self, value: bool) {
2773        unsafe {
2774            ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2775        }
2776    }
2777
2778    /// Defines the underlying memtable implementation.
2779    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2780    /// Defaults to using a skiplist.
2781    ///
2782    /// # Examples
2783    ///
2784    /// ```
2785    /// use rocksdb::{Options, MemtableFactory};
2786    /// let mut opts = Options::default();
2787    /// let factory = MemtableFactory::HashSkipList {
2788    ///     bucket_count: 1_000_000,
2789    ///     height: 4,
2790    ///     branching_factor: 4,
2791    /// };
2792    ///
2793    /// opts.set_allow_concurrent_memtable_write(false);
2794    /// opts.set_memtable_factory(factory);
2795    /// ```
2796    pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2797        match factory {
2798            MemtableFactory::Vector => unsafe {
2799                ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2800            },
2801            MemtableFactory::HashSkipList {
2802                bucket_count,
2803                height,
2804                branching_factor,
2805            } => unsafe {
2806                ffi::rocksdb_options_set_hash_skip_list_rep(
2807                    self.inner,
2808                    bucket_count,
2809                    height,
2810                    branching_factor,
2811                );
2812            },
2813            MemtableFactory::HashLinkList { bucket_count } => unsafe {
2814                ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2815            },
2816        }
2817    }
2818
2819    pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2820        unsafe {
2821            ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2822        }
2823        self.outlive.block_based = Some(factory.outlive.clone());
2824    }
2825
2826    /// Sets the table factory to a CuckooTableFactory (the default table
2827    /// factory is a block-based table factory that provides a default
2828    /// implementation of TableBuilder and TableReader with default
2829    /// BlockBasedTableOptions).
2830    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2831    /// # Examples
2832    ///
2833    /// ```
2834    /// use rocksdb::{Options, CuckooTableOptions};
2835    ///
2836    /// let mut opts = Options::default();
2837    /// let mut factory_opts = CuckooTableOptions::default();
2838    /// factory_opts.set_hash_ratio(0.8);
2839    /// factory_opts.set_max_search_depth(20);
2840    /// factory_opts.set_cuckoo_block_size(10);
2841    /// factory_opts.set_identity_as_first_hash(true);
2842    /// factory_opts.set_use_module_hash(false);
2843    ///
2844    /// opts.set_cuckoo_table_factory(&factory_opts);
2845    /// ```
2846    pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2847        unsafe {
2848            ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2849        }
2850    }
2851
2852    // This is a factory that provides TableFactory objects.
2853    // Default: a block-based table factory that provides a default
2854    // implementation of TableBuilder and TableReader with default
2855    // BlockBasedTableOptions.
2856    /// Sets the factory as plain table.
2857    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2858    /// information.
2859    ///
2860    /// # Examples
2861    ///
2862    /// ```
2863    /// use rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2864    ///
2865    /// let mut opts = Options::default();
2866    /// let factory_opts = PlainTableFactoryOptions {
2867    ///   user_key_length: 0,
2868    ///   bloom_bits_per_key: 20,
2869    ///   hash_table_ratio: 0.75,
2870    ///   index_sparseness: 16,
2871    ///   huge_page_tlb_size: 0,
2872    ///   encoding_type: KeyEncodingType::Plain,
2873    ///   full_scan_mode: false,
2874    ///   store_index_in_file: false,
2875    /// };
2876    ///
2877    /// opts.set_plain_table_factory(&factory_opts);
2878    /// ```
2879    pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2880        unsafe {
2881            ffi::rocksdb_options_set_plain_table_factory(
2882                self.inner,
2883                options.user_key_length,
2884                options.bloom_bits_per_key,
2885                options.hash_table_ratio,
2886                options.index_sparseness,
2887                options.huge_page_tlb_size,
2888                options.encoding_type as c_char,
2889                c_uchar::from(options.full_scan_mode),
2890                c_uchar::from(options.store_index_in_file),
2891            );
2892        }
2893    }
2894
2895    /// Sets the start level to use compression.
2896    pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2897        unsafe {
2898            ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2899        }
2900    }
2901
2902    /// Measure IO stats in compactions and flushes, if `true`.
2903    ///
2904    /// Default: `false`
2905    ///
2906    /// # Examples
2907    ///
2908    /// ```
2909    /// use rocksdb::Options;
2910    ///
2911    /// let mut opts = Options::default();
2912    /// opts.set_report_bg_io_stats(true);
2913    /// ```
2914    pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2915        unsafe {
2916            ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2917        }
2918    }
2919
2920    /// Once write-ahead logs exceed this size, we will start forcing the flush of
2921    /// column families whose memtables are backed by the oldest live WAL file
2922    /// (i.e. the ones that are causing all the space amplification).
2923    ///
2924    /// Default: `0`
2925    ///
2926    /// # Examples
2927    ///
2928    /// ```
2929    /// use rocksdb::Options;
2930    ///
2931    /// let mut opts = Options::default();
2932    /// // Set max total wal size to 1G.
2933    /// opts.set_max_total_wal_size(1 << 30);
2934    /// ```
2935    pub fn set_max_total_wal_size(&mut self, size: u64) {
2936        unsafe {
2937            ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2938        }
2939    }
2940
2941    /// Recovery mode to control the consistency while replaying WAL.
2942    ///
2943    /// Default: DBRecoveryMode::PointInTime
2944    ///
2945    /// # Examples
2946    ///
2947    /// ```
2948    /// use rocksdb::{Options, DBRecoveryMode};
2949    ///
2950    /// let mut opts = Options::default();
2951    /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2952    /// ```
2953    pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2954        unsafe {
2955            ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2956        }
2957    }
2958
2959    pub fn enable_statistics(&mut self) {
2960        unsafe {
2961            ffi::rocksdb_options_enable_statistics(self.inner);
2962        }
2963    }
2964
2965    pub fn get_statistics(&self) -> Option<String> {
2966        unsafe {
2967            let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2968            if value.is_null() {
2969                return None;
2970            }
2971
2972            // Must have valid UTF-8 format.
2973            let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2974            ffi::rocksdb_free(value as *mut c_void);
2975            Some(s)
2976        }
2977    }
2978
2979    /// StatsLevel can be used to reduce statistics overhead by skipping certain
2980    /// types of stats in the stats collection process.
2981    pub fn set_statistics_level(&self, level: StatsLevel) {
2982        unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
2983    }
2984
2985    /// Returns the value of cumulative db counters if stat collection is enabled.
2986    pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
2987        unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
2988    }
2989
2990    /// Gets Histogram data from collected db stats. Requires stats to be enabled.
2991    pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
2992        unsafe {
2993            let data = HistogramData::default();
2994            ffi::rocksdb_options_statistics_get_histogram_data(
2995                self.inner,
2996                histogram as u32,
2997                data.inner,
2998            );
2999            data
3000        }
3001    }
3002
3003    /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
3004    ///
3005    /// Default: `600` (10 mins)
3006    ///
3007    /// # Examples
3008    ///
3009    /// ```
3010    /// use rocksdb::Options;
3011    ///
3012    /// let mut opts = Options::default();
3013    /// opts.set_stats_dump_period_sec(300);
3014    /// ```
3015    pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
3016        unsafe {
3017            ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
3018        }
3019    }
3020
3021    /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
3022    ///
3023    /// Default: `600` (10 mins)
3024    ///
3025    /// # Examples
3026    ///
3027    /// ```
3028    /// use rocksdb::Options;
3029    ///
3030    /// let mut opts = Options::default();
3031    /// opts.set_stats_persist_period_sec(5);
3032    /// ```
3033    pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
3034        unsafe {
3035            ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
3036        }
3037    }
3038
3039    /// When set to true, reading SST files will opt out of the filesystem's
3040    /// readahead. Setting this to false may improve sequential iteration
3041    /// performance.
3042    ///
3043    /// Default: `true`
3044    pub fn set_advise_random_on_open(&mut self, advise: bool) {
3045        unsafe {
3046            ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
3047        }
3048    }
3049
3050    /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
3051    ///
3052    /// This could reduce context switch when the mutex is not
3053    /// heavily contended. However, if the mutex is hot, we could end up
3054    /// wasting spin time.
3055    ///
3056    /// Default: false
3057    pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
3058        unsafe {
3059            ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
3060        }
3061    }
3062
3063    /// Sets the number of levels for this database.
3064    pub fn set_num_levels(&mut self, n: c_int) {
3065        unsafe {
3066            ffi::rocksdb_options_set_num_levels(self.inner, n);
3067        }
3068    }
3069
3070    /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
3071    /// creates a prefix bloom filter for each memtable with the size of
3072    /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
3073    ///
3074    /// Default: `0`
3075    ///
3076    /// # Examples
3077    ///
3078    /// ```
3079    /// use rocksdb::{Options, SliceTransform};
3080    ///
3081    /// let mut opts = Options::default();
3082    /// let transform = SliceTransform::create_fixed_prefix(10);
3083    /// opts.set_prefix_extractor(transform);
3084    /// opts.set_memtable_prefix_bloom_ratio(0.2);
3085    /// ```
3086    pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3087        unsafe {
3088            ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3089        }
3090    }
3091
3092    /// Sets the maximum number of bytes in all compacted files.
3093    /// We try to limit number of bytes in one compaction to be lower than this
3094    /// threshold. But it's not guaranteed.
3095    ///
3096    /// Value 0 will be sanitized.
3097    ///
3098    /// Default: target_file_size_base * 25
3099    pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3100        unsafe {
3101            ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3102        }
3103    }
3104
3105    /// Specifies the absolute path of the directory the
3106    /// write-ahead log (WAL) should be written to.
3107    ///
3108    /// Default: same directory as the database
3109    ///
3110    /// # Examples
3111    ///
3112    /// ```
3113    /// use rocksdb::Options;
3114    ///
3115    /// let mut opts = Options::default();
3116    /// opts.set_wal_dir("/path/to/dir");
3117    /// ```
3118    pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3119        let p = to_cpath(path).unwrap();
3120        unsafe {
3121            ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3122        }
3123    }
3124
3125    /// Sets the WAL ttl in seconds.
3126    ///
3127    /// The following two options affect how archived logs will be deleted.
3128    /// 1. If both set to 0, logs will be deleted asap and will not get into
3129    ///    the archive.
3130    /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3131    ///    WAL files will be checked every 10 min and if total size is greater
3132    ///    then wal_size_limit_mb, they will be deleted starting with the
3133    ///    earliest until size_limit is met. All empty files will be deleted.
3134    /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3135    ///    WAL files will be checked every wal_ttl_seconds / 2 and those that
3136    ///    are older than wal_ttl_seconds will be deleted.
3137    /// 4. If both are not 0, WAL files will be checked every 10 min and both
3138    ///    checks will be performed with ttl being first.
3139    ///
3140    /// Default: 0
3141    pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3142        unsafe {
3143            ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3144        }
3145    }
3146
3147    /// Sets the WAL size limit in MB.
3148    ///
3149    /// If total size of WAL files is greater then wal_size_limit_mb,
3150    /// they will be deleted starting with the earliest until size_limit is met.
3151    ///
3152    /// Default: 0
3153    pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3154        unsafe {
3155            ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3156        }
3157    }
3158
3159    /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3160    ///
3161    /// Default is 4MB, which is reasonable to reduce random IO
3162    /// as well as prevent overallocation for mounts that preallocate
3163    /// large amounts of data (such as xfs's allocsize option).
3164    pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3165        unsafe {
3166            ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3167        }
3168    }
3169
3170    /// If true, then DB::Open() will not update the statistics used to optimize
3171    /// compaction decision by loading table properties from many files.
3172    /// Turning off this feature will improve DBOpen time especially in disk environment.
3173    ///
3174    /// Default: false
3175    pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3176        unsafe {
3177            ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3178        }
3179    }
3180
3181    /// Specify the maximal number of info log files to be kept.
3182    ///
3183    /// Default: 1000
3184    ///
3185    /// # Examples
3186    ///
3187    /// ```
3188    /// use rocksdb::Options;
3189    ///
3190    /// let mut options = Options::default();
3191    /// options.set_keep_log_file_num(100);
3192    /// ```
3193    pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3194        unsafe {
3195            ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3196        }
3197    }
3198
3199    /// Allow the OS to mmap file for writing.
3200    ///
3201    /// Default: false
3202    ///
3203    /// # Examples
3204    ///
3205    /// ```
3206    /// use rocksdb::Options;
3207    ///
3208    /// let mut options = Options::default();
3209    /// options.set_allow_mmap_writes(true);
3210    /// ```
3211    pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3212        unsafe {
3213            ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3214        }
3215    }
3216
3217    /// Allow the OS to mmap file for reading sst tables.
3218    ///
3219    /// Default: false
3220    ///
3221    /// # Examples
3222    ///
3223    /// ```
3224    /// use rocksdb::Options;
3225    ///
3226    /// let mut options = Options::default();
3227    /// options.set_allow_mmap_reads(true);
3228    /// ```
3229    pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3230        unsafe {
3231            ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3232        }
3233    }
3234
3235    /// If enabled, WAL is not flushed automatically after each write. Instead it
3236    /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3237    /// to its file.
3238    ///
3239    /// Default: false
3240    ///
3241    /// # Examples
3242    ///
3243    /// ```
3244    /// use rocksdb::Options;
3245    ///
3246    /// let mut options = Options::default();
3247    /// options.set_manual_wal_flush(true);
3248    /// ```
3249    pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3250        unsafe {
3251            ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3252        }
3253    }
3254
3255    /// Guarantee that all column families are flushed together atomically.
3256    /// This option applies to both manual flushes (`db.flush()`) and automatic
3257    /// background flushes caused when memtables are filled.
3258    ///
3259    /// Note that this is only useful when the WAL is disabled. When using the
3260    /// WAL, writes are always consistent across column families.
3261    ///
3262    /// Default: false
3263    ///
3264    /// # Examples
3265    ///
3266    /// ```
3267    /// use rocksdb::Options;
3268    ///
3269    /// let mut options = Options::default();
3270    /// options.set_atomic_flush(true);
3271    /// ```
3272    pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3273        unsafe {
3274            ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3275        }
3276    }
3277
3278    /// Sets global cache for table-level rows.
3279    ///
3280    /// Default: null (disabled)
3281    /// Not supported in ROCKSDB_LITE mode!
3282    pub fn set_row_cache(&mut self, cache: &Cache) {
3283        unsafe {
3284            ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3285        }
3286        self.outlive.row_cache = Some(cache.clone());
3287    }
3288
3289    /// Use to control write rate of flush and compaction. Flush has higher
3290    /// priority than compaction.
3291    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3292    ///
3293    /// Default: disable
3294    ///
3295    /// # Examples
3296    ///
3297    /// ```
3298    /// use rocksdb::Options;
3299    ///
3300    /// let mut options = Options::default();
3301    /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3302    /// ```
3303    pub fn set_ratelimiter(
3304        &mut self,
3305        rate_bytes_per_sec: i64,
3306        refill_period_us: i64,
3307        fairness: i32,
3308    ) {
3309        unsafe {
3310            let ratelimiter =
3311                ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3312            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3313            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3314        }
3315    }
3316
3317    /// Use to control write rate of flush and compaction. Flush has higher
3318    /// priority than compaction.
3319    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3320    ///
3321    /// Default: disable
3322    pub fn set_auto_tuned_ratelimiter(
3323        &mut self,
3324        rate_bytes_per_sec: i64,
3325        refill_period_us: i64,
3326        fairness: i32,
3327    ) {
3328        unsafe {
3329            let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3330                rate_bytes_per_sec,
3331                refill_period_us,
3332                fairness,
3333            );
3334            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3335            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3336        }
3337    }
3338
3339    /// Sets the maximal size of the info log file.
3340    ///
3341    /// If the log file is larger than `max_log_file_size`, a new info log file
3342    /// will be created. If `max_log_file_size` is equal to zero, all logs will
3343    /// be written to one log file.
3344    ///
3345    /// Default: 0
3346    ///
3347    /// # Examples
3348    ///
3349    /// ```
3350    /// use rocksdb::Options;
3351    ///
3352    /// let mut options = Options::default();
3353    /// options.set_max_log_file_size(0);
3354    /// ```
3355    pub fn set_max_log_file_size(&mut self, size: usize) {
3356        unsafe {
3357            ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3358        }
3359    }
3360
3361    /// Sets the time for the info log file to roll (in seconds).
3362    ///
3363    /// If specified with non-zero value, log file will be rolled
3364    /// if it has been active longer than `log_file_time_to_roll`.
3365    /// Default: 0 (disabled)
3366    pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3367        unsafe {
3368            ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3369        }
3370    }
3371
3372    /// Controls the recycling of log files.
3373    ///
3374    /// If non-zero, previously written log files will be reused for new logs,
3375    /// overwriting the old data. The value indicates how many such files we will
3376    /// keep around at any point in time for later use. This is more efficient
3377    /// because the blocks are already allocated and fdatasync does not need to
3378    /// update the inode after each write.
3379    ///
3380    /// Default: 0
3381    ///
3382    /// # Examples
3383    ///
3384    /// ```
3385    /// use rocksdb::Options;
3386    ///
3387    /// let mut options = Options::default();
3388    /// options.set_recycle_log_file_num(5);
3389    /// ```
3390    pub fn set_recycle_log_file_num(&mut self, num: usize) {
3391        unsafe {
3392            ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3393        }
3394    }
3395
3396    /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3397    /// bytes needed to be compaction exceed this threshold.
3398    ///
3399    /// Default: 64GB
3400    pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3401        unsafe {
3402            ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3403        }
3404    }
3405
3406    /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3407    /// this threshold.
3408    ///
3409    /// Default: 256GB
3410    pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3411        unsafe {
3412            ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3413        }
3414    }
3415
3416    /// Sets the size of one block in arena memory allocation.
3417    ///
3418    /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3419    /// writer_buffer_size).
3420    ///
3421    /// Default: 0
3422    pub fn set_arena_block_size(&mut self, size: usize) {
3423        unsafe {
3424            ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3425        }
3426    }
3427
3428    /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3429    ///
3430    /// Default: false
3431    pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3432        unsafe {
3433            ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3434        }
3435    }
3436
3437    /// Enable whole key bloom filter in memtable. Note this will only take effect
3438    /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3439    /// can potentially reduce CPU usage for point-look-ups.
3440    ///
3441    /// Default: false (disable)
3442    ///
3443    /// Dynamically changeable through SetOptions() API
3444    pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3445        unsafe {
3446            ffi::rocksdb_options_set_memtable_whole_key_filtering(
3447                self.inner,
3448                c_uchar::from(whole_key_filter),
3449            );
3450        }
3451    }
3452
3453    /// Enable the use of key-value separation.
3454    ///
3455    /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3456    ///
3457    /// Default: false (disable)
3458    ///
3459    /// Dynamically changeable through SetOptions() API
3460    pub fn set_enable_blob_files(&mut self, val: bool) {
3461        unsafe {
3462            ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3463        }
3464    }
3465
3466    /// Sets the minimum threshold value at or above which will be written
3467    /// to blob files during flush or compaction.
3468    ///
3469    /// Dynamically changeable through SetOptions() API
3470    pub fn set_min_blob_size(&mut self, val: u64) {
3471        unsafe {
3472            ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3473        }
3474    }
3475
3476    /// Sets the size limit for blob files.
3477    ///
3478    /// Dynamically changeable through SetOptions() API
3479    pub fn set_blob_file_size(&mut self, val: u64) {
3480        unsafe {
3481            ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3482        }
3483    }
3484
3485    /// Sets the blob compression type. All blob files use the same
3486    /// compression type.
3487    ///
3488    /// Dynamically changeable through SetOptions() API
3489    pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3490        unsafe {
3491            ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3492        }
3493    }
3494
3495    /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3496    /// as they are encountered during compaction.
3497    ///
3498    /// Dynamically changeable through SetOptions() API
3499    pub fn set_enable_blob_gc(&mut self, val: bool) {
3500        unsafe {
3501            ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3502        }
3503    }
3504
3505    /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3506    ///
3507    /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3508    /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3509    /// the trade-off between write amplification and space amplification.
3510    ///
3511    /// Dynamically changeable through SetOptions() API
3512    pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3513        unsafe {
3514            ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3515        }
3516    }
3517
3518    /// Sets the blob GC force threshold.
3519    ///
3520    /// Dynamically changeable through SetOptions() API
3521    pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3522        unsafe {
3523            ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3524        }
3525    }
3526
3527    /// Sets the blob compaction read ahead size.
3528    ///
3529    /// Dynamically changeable through SetOptions() API
3530    pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3531        unsafe {
3532            ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3533        }
3534    }
3535
3536    /// Sets the blob cache.
3537    ///
3538    /// Using a dedicated object for blobs and using the same object for the block and blob caches
3539    /// are both supported. In the latter case, note that blobs are less valuable from a caching
3540    /// perspective than SST blocks, and some cache implementations have configuration options that
3541    /// can be used to prioritize items accordingly (see Cache::Priority and
3542    /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3543    ///
3544    /// Default: disabled
3545    pub fn set_blob_cache(&mut self, cache: &Cache) {
3546        unsafe {
3547            ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3548        }
3549        self.outlive.blob_cache = Some(cache.clone());
3550    }
3551
3552    /// Set this option to true during creation of database if you want
3553    /// to be able to ingest behind (call IngestExternalFile() skipping keys
3554    /// that already exist, rather than overwriting matching keys).
3555    /// Setting this option to true has the following effects:
3556    ///
3557    /// 1. Disable some internal optimizations around SST file compression.
3558    /// 2. Reserve the last level for ingested files only.
3559    /// 3. Compaction will not include any file from the last level.
3560    ///
3561    /// Note that only Universal Compaction supports allow_ingest_behind.
3562    /// `num_levels` should be >= 3 if this option is turned on.
3563    ///
3564    /// DEFAULT: false
3565    /// Immutable.
3566    pub fn set_allow_ingest_behind(&mut self, val: bool) {
3567        unsafe {
3568            ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3569        }
3570    }
3571
3572    // A factory of a table property collector that marks an SST
3573    // file as need-compaction when it observe at least "D" deletion
3574    // entries in any "N" consecutive entries, or the ratio of tombstone
3575    // entries >= deletion_ratio.
3576    //
3577    // `window_size`: is the sliding window size "N"
3578    // `num_dels_trigger`: is the deletion trigger "D"
3579    // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3580    // deletion ratio.
3581    pub fn add_compact_on_deletion_collector_factory(
3582        &mut self,
3583        window_size: size_t,
3584        num_dels_trigger: size_t,
3585        deletion_ratio: f64,
3586    ) {
3587        unsafe {
3588            ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3589                self.inner,
3590                window_size,
3591                num_dels_trigger,
3592                deletion_ratio,
3593            );
3594        }
3595    }
3596
3597    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3598    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3599    /// Users can enable this control by 2 ways:
3600    ///
3601    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3602    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3603    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3604    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3605    pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3606        unsafe {
3607            ffi::rocksdb_options_set_write_buffer_manager(
3608                self.inner,
3609                write_buffer_manager.0.inner.as_ptr(),
3610            );
3611        }
3612        self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3613    }
3614
3615    /// If true, working thread may avoid doing unnecessary and long-latency
3616    /// operation (such as deleting obsolete files directly or deleting memtable)
3617    /// and will instead schedule a background job to do it.
3618    ///
3619    /// Use it if you're latency-sensitive.
3620    ///
3621    /// Default: false (disabled)
3622    pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3623        unsafe {
3624            ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3625        }
3626    }
3627
3628    /// If true, the log numbers and sizes of the synced WALs are tracked
3629    /// in MANIFEST. During DB recovery, if a synced WAL is missing
3630    /// from disk, or the WAL's size does not match the recorded size in
3631    /// MANIFEST, an error will be reported and the recovery will be aborted.
3632    ///
3633    /// This is one additional protection against WAL corruption besides the
3634    /// per-WAL-entry checksum.
3635    ///
3636    /// Note that this option does not work with secondary instance.
3637    /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3638    /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3639    /// tracked for performance/efficiency reasons.
3640    ///
3641    /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3642    ///
3643    /// Default: false (disabled)
3644    pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3645        unsafe {
3646            ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3647        }
3648    }
3649
3650    /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3651    pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3652        let val_u8 =
3653            unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3654        val_u8 != 0
3655    }
3656
3657    /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3658    /// or an IDENTITY file (historical, deprecated), or both. If this option is
3659    /// set to false (old behavior), then `write_identity_file` must be set to true.
3660    /// The manifest is preferred because
3661    ///
3662    /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3663    ///    corruption.
3664    /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3665    ///    copied by BackupEngine), so is not reliable for the provenance of a DB.
3666    ///
3667    /// This option might eventually be obsolete and removed as Identity files
3668    /// are phased out.
3669    ///
3670    /// Default: true (enabled)
3671    pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3672        unsafe {
3673            ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3674        }
3675    }
3676
3677    /// Returns the value of the `write_dbid_to_manifest` option.
3678    pub fn get_write_dbid_to_manifest(&self) -> bool {
3679        let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3680        val_u8 != 0
3681    }
3682}
3683
3684impl Default for Options {
3685    fn default() -> Self {
3686        unsafe {
3687            let opts = ffi::rocksdb_options_create();
3688            assert!(!opts.is_null(), "Could not create RocksDB options");
3689
3690            Self {
3691                inner: opts,
3692                outlive: OptionsMustOutliveDB::default(),
3693            }
3694        }
3695    }
3696}
3697
3698impl FlushOptions {
3699    pub fn new() -> FlushOptions {
3700        FlushOptions::default()
3701    }
3702
3703    /// Waits until the flush is done.
3704    ///
3705    /// Default: true
3706    ///
3707    /// # Examples
3708    ///
3709    /// ```
3710    /// use rocksdb::FlushOptions;
3711    ///
3712    /// let mut options = FlushOptions::default();
3713    /// options.set_wait(false);
3714    /// ```
3715    pub fn set_wait(&mut self, wait: bool) {
3716        unsafe {
3717            ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3718        }
3719    }
3720}
3721
3722impl Default for FlushOptions {
3723    fn default() -> Self {
3724        let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3725        assert!(
3726            !flush_opts.is_null(),
3727            "Could not create RocksDB flush options"
3728        );
3729
3730        Self { inner: flush_opts }
3731    }
3732}
3733
3734impl WriteOptions {
3735    pub fn new() -> WriteOptions {
3736        WriteOptions::default()
3737    }
3738
3739    /// Sets the sync mode. If true, the write will be flushed
3740    /// from the operating system buffer cache before the write is considered complete.
3741    /// If this flag is true, writes will be slower.
3742    ///
3743    /// Default: false
3744    pub fn set_sync(&mut self, sync: bool) {
3745        unsafe {
3746            ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3747        }
3748    }
3749
3750    /// Sets whether WAL should be active or not.
3751    /// If true, writes will not first go to the write ahead log,
3752    /// and the write may got lost after a crash.
3753    ///
3754    /// Default: false
3755    pub fn disable_wal(&mut self, disable: bool) {
3756        unsafe {
3757            ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3758        }
3759    }
3760
3761    /// If true and if user is trying to write to column families that don't exist (they were dropped),
3762    /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3763    /// other writes will succeed.
3764    ///
3765    /// Default: false
3766    pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3767        unsafe {
3768            ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3769                self.inner,
3770                c_uchar::from(ignore),
3771            );
3772        }
3773    }
3774
3775    /// If true and we need to wait or sleep for the write request, fails
3776    /// immediately with Status::Incomplete().
3777    ///
3778    /// Default: false
3779    pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3780        unsafe {
3781            ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3782        }
3783    }
3784
3785    /// If true, this write request is of lower priority if compaction is
3786    /// behind. In this case, no_slowdown = true, the request will be cancelled
3787    /// immediately with Status::Incomplete() returned. Otherwise, it will be
3788    /// slowed down. The slowdown value is determined by RocksDB to guarantee
3789    /// it introduces minimum impacts to high priority writes.
3790    ///
3791    /// Default: false
3792    pub fn set_low_pri(&mut self, v: bool) {
3793        unsafe {
3794            ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3795        }
3796    }
3797
3798    /// If true, writebatch will maintain the last insert positions of each
3799    /// memtable as hints in concurrent write. It can improve write performance
3800    /// in concurrent writes if keys in one writebatch are sequential. In
3801    /// non-concurrent writes (when concurrent_memtable_writes is false) this
3802    /// option will be ignored.
3803    ///
3804    /// Default: false
3805    pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3806        unsafe {
3807            ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3808                self.inner,
3809                c_uchar::from(v),
3810            );
3811        }
3812    }
3813}
3814
3815impl Default for WriteOptions {
3816    fn default() -> Self {
3817        let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3818        assert!(
3819            !write_opts.is_null(),
3820            "Could not create RocksDB write options"
3821        );
3822
3823        Self { inner: write_opts }
3824    }
3825}
3826
3827impl LruCacheOptions {
3828    /// Capacity of the cache, in the same units as the `charge` of each entry.
3829    /// This is typically measured in bytes, but can be a different unit if using
3830    /// kDontChargeCacheMetadata.
3831    pub fn set_capacity(&mut self, cap: usize) {
3832        unsafe {
3833            ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
3834        }
3835    }
3836
3837    /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
3838    /// If < 0, a good default is chosen based on the capacity and the
3839    /// implementation. (Mutex-based implementations are much more reliant
3840    /// on many shards for parallel scalability.)
3841    pub fn set_num_shard_bits(&mut self, val: c_int) {
3842        unsafe {
3843            ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
3844        }
3845    }
3846}
3847
3848impl Default for LruCacheOptions {
3849    fn default() -> Self {
3850        let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
3851        assert!(
3852            !inner.is_null(),
3853            "Could not create RocksDB LRU cache options"
3854        );
3855
3856        Self { inner }
3857    }
3858}
3859
3860#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3861#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3862#[repr(i32)]
3863pub enum ReadTier {
3864    /// Reads data in memtable, block cache, OS cache or storage.
3865    All = 0,
3866    /// Reads data in memtable or block cache.
3867    BlockCache,
3868    /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
3869    Persisted,
3870    /// Reads data in memtable. Used for memtable only iterators.
3871    Memtable,
3872}
3873
3874#[repr(i32)]
3875pub enum CompactionPri {
3876    /// Slightly prioritize larger files by size compensated by #deletes
3877    ByCompensatedSize = 0,
3878    /// First compact files whose data's latest update time is oldest.
3879    /// Try this if you only update some hot keys in small ranges.
3880    OldestLargestSeqFirst = 1,
3881    /// First compact files whose range hasn't been compacted to the next level
3882    /// for the longest. If your updates are random across the key space,
3883    /// write amplification is slightly better with this option.
3884    OldestSmallestSeqFirst = 2,
3885    /// First compact files whose ratio between overlapping size in next level
3886    /// and its size is the smallest. It in many cases can optimize write amplification.
3887    MinOverlappingRatio = 3,
3888    /// Keeps a cursor(s) of the successor of the file (key range) was/were
3889    /// compacted before, and always picks the next files (key range) in that
3890    /// level. The file picking process will cycle through all the files in a
3891    /// round-robin manner.
3892    RoundRobin = 4,
3893}
3894
3895impl ReadOptions {
3896    // TODO add snapshot setting here
3897    // TODO add snapshot wrapper structs with proper destructors;
3898    // that struct needs an "iterator" impl too.
3899
3900    /// Specify whether the "data block"/"index block"/"filter block"
3901    /// read for this iteration should be cached in memory?
3902    /// Callers may wish to set this field to false for bulk scans.
3903    ///
3904    /// Default: true
3905    pub fn fill_cache(&mut self, v: bool) {
3906        unsafe {
3907            ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
3908        }
3909    }
3910
3911    /// Sets the snapshot which should be used for the read.
3912    /// The snapshot must belong to the DB that is being read and must
3913    /// not have been released.
3914    pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
3915        unsafe {
3916            ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
3917        }
3918    }
3919
3920    /// Sets the lower bound for an iterator.
3921    pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3922        self.set_lower_bound_impl(Some(key.into()));
3923    }
3924
3925    /// Sets the upper bound for an iterator.
3926    /// The upper bound itself is not included on the iteration result.
3927    pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3928        self.set_upper_bound_impl(Some(key.into()));
3929    }
3930
3931    /// Sets lower and upper bounds based on the provided range.  This is
3932    /// similar to setting lower and upper bounds separately except that it also
3933    /// allows either bound to be reset.
3934    ///
3935    /// The argument can be a regular Rust range, e.g. `lower..upper`.  However,
3936    /// since RocksDB upper bound is always excluded (i.e. range can never be
3937    /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
3938    /// supported.  For example:
3939    ///
3940    /// ```
3941    /// let mut options = rocksdb::ReadOptions::default();
3942    /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
3943    /// ```
3944    ///
3945    /// In addition, [`crate::PrefixRange`] can be used to specify a range of
3946    /// keys with a given prefix.  In particular, the above example is
3947    /// equivalent to:
3948    ///
3949    /// ```
3950    /// let mut options = rocksdb::ReadOptions::default();
3951    /// options.set_iterate_range(rocksdb::PrefixRange("xy".as_bytes()));
3952    /// ```
3953    ///
3954    /// Note that setting range using this method is separate to using prefix
3955    /// iterators.  Prefix iterators use prefix extractor configured for
3956    /// a column family.  Setting bounds via [`crate::PrefixRange`] is more akin
3957    /// to using manual prefix.
3958    ///
3959    /// Using this method clears any previously set bounds.  In other words, the
3960    /// bounds can be reset by setting the range to `..` as in:
3961    ///
3962    /// ```
3963    /// let mut options = rocksdb::ReadOptions::default();
3964    /// options.set_iterate_range(..);
3965    /// ```
3966    pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
3967        let (lower, upper) = range.into_bounds();
3968        self.set_lower_bound_impl(lower);
3969        self.set_upper_bound_impl(upper);
3970    }
3971
3972    fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3973        let (ptr, len) = if let Some(ref bound) = bound {
3974            (bound.as_ptr() as *const c_char, bound.len())
3975        } else if self.iterate_lower_bound.is_some() {
3976            (std::ptr::null(), 0)
3977        } else {
3978            return;
3979        };
3980        self.iterate_lower_bound = bound;
3981        unsafe {
3982            ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
3983        }
3984    }
3985
3986    fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3987        let (ptr, len) = if let Some(ref bound) = bound {
3988            (bound.as_ptr() as *const c_char, bound.len())
3989        } else if self.iterate_upper_bound.is_some() {
3990            (std::ptr::null(), 0)
3991        } else {
3992            return;
3993        };
3994        self.iterate_upper_bound = bound;
3995        unsafe {
3996            ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
3997        }
3998    }
3999
4000    /// Specify if this read request should process data that ALREADY
4001    /// resides on a particular cache. If the required data is not
4002    /// found at the specified cache, then Status::Incomplete is returned.
4003    ///
4004    /// Default: ::All
4005    pub fn set_read_tier(&mut self, tier: ReadTier) {
4006        unsafe {
4007            ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
4008        }
4009    }
4010
4011    /// Enforce that the iterator only iterates over the same
4012    /// prefix as the seek.
4013    /// This option is effective only for prefix seeks, i.e. prefix_extractor is
4014    /// non-null for the column family and total_order_seek is false.  Unlike
4015    /// iterate_upper_bound, prefix_same_as_start only works within a prefix
4016    /// but in both directions.
4017    ///
4018    /// Default: false
4019    pub fn set_prefix_same_as_start(&mut self, v: bool) {
4020        unsafe {
4021            ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
4022        }
4023    }
4024
4025    /// Enable a total order seek regardless of index format (e.g. hash index)
4026    /// used in the table. Some table format (e.g. plain table) may not support
4027    /// this option.
4028    ///
4029    /// If true when calling Get(), we also skip prefix bloom when reading from
4030    /// block based table. It provides a way to read existing data after
4031    /// changing implementation of prefix extractor.
4032    pub fn set_total_order_seek(&mut self, v: bool) {
4033        unsafe {
4034            ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
4035        }
4036    }
4037
4038    /// Sets a threshold for the number of keys that can be skipped
4039    /// before failing an iterator seek as incomplete. The default value of 0 should be used to
4040    /// never fail a request as incomplete, even on skipping too many keys.
4041    ///
4042    /// Default: 0
4043    pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
4044        unsafe {
4045            ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
4046        }
4047    }
4048
4049    /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
4050    /// in the flush job queue and delete obsolete files in background.
4051    ///
4052    /// Default: false
4053    pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
4054        unsafe {
4055            ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
4056                self.inner,
4057                c_uchar::from(v),
4058            );
4059        }
4060    }
4061
4062    /// If true, keys deleted using the DeleteRange() API will be visible to
4063    /// readers until they are naturally deleted during compaction.
4064    ///
4065    /// Default: false
4066    #[deprecated(
4067        note = "deprecated in RocksDB 10.2.1: no performance impact if DeleteRange is not used"
4068    )]
4069    pub fn set_ignore_range_deletions(&mut self, v: bool) {
4070        unsafe {
4071            ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
4072        }
4073    }
4074
4075    /// If true, all data read from underlying storage will be
4076    /// verified against corresponding checksums.
4077    ///
4078    /// Default: true
4079    pub fn set_verify_checksums(&mut self, v: bool) {
4080        unsafe {
4081            ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
4082        }
4083    }
4084
4085    /// If non-zero, an iterator will create a new table reader which
4086    /// performs reads of the given size. Using a large size (> 2MB) can
4087    /// improve the performance of forward iteration on spinning disks.
4088    /// Default: 0
4089    ///
4090    /// ```
4091    /// use rocksdb::{ReadOptions};
4092    ///
4093    /// let mut opts = ReadOptions::default();
4094    /// opts.set_readahead_size(4_194_304); // 4mb
4095    /// ```
4096    pub fn set_readahead_size(&mut self, v: usize) {
4097        unsafe {
4098            ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4099        }
4100    }
4101
4102    /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4103    /// during scans internally.
4104    /// For this feature to be enabled, iterate_upper_bound must also be specified.
4105    ///
4106    /// NOTE: - Recommended for forward Scans only.
4107    ///       - If there is a backward scans, this option will be
4108    ///         disabled internally and won't be enabled again if the forward scan
4109    ///         is issued again.
4110    ///
4111    /// Default: true
4112    pub fn set_auto_readahead_size(&mut self, v: bool) {
4113        unsafe {
4114            ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4115        }
4116    }
4117
4118    /// If true, create a tailing iterator. Note that tailing iterators
4119    /// only support moving in the forward direction. Iterating in reverse
4120    /// or seek_to_last are not supported.
4121    pub fn set_tailing(&mut self, v: bool) {
4122        unsafe {
4123            ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4124        }
4125    }
4126
4127    /// Specifies the value of "pin_data". If true, it keeps the blocks
4128    /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4129    /// If used when reading from tables created with
4130    /// BlockBasedTableOptions::use_delta_encoding = false,
4131    /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4132    /// return 1.
4133    ///
4134    /// Default: false
4135    pub fn set_pin_data(&mut self, v: bool) {
4136        unsafe {
4137            ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4138        }
4139    }
4140
4141    /// Asynchronously prefetch some data.
4142    ///
4143    /// Used for sequential reads and internal automatic prefetching.
4144    ///
4145    /// Default: `false`
4146    pub fn set_async_io(&mut self, v: bool) {
4147        unsafe {
4148            ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4149        }
4150    }
4151
4152    /// Timestamp of operation. Read should return the latest data visible to the
4153    /// specified timestamp. All timestamps of the same database must be of the
4154    /// same length and format. The user is responsible for providing a customized
4155    /// compare function via Comparator to order <key, timestamp> tuples.
4156    /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4157    /// serves as the upper bound. Versions of the same record that fall in
4158    /// the timestamp range will be returned. If iter_start_ts is nullptr,
4159    /// only the most recent version visible to timestamp is returned.
4160    /// The user-specified timestamp feature is still under active development,
4161    /// and the API is subject to change.
4162    pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4163        self.set_timestamp_impl(Some(ts.into()));
4164    }
4165
4166    fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4167        let (ptr, len) = if let Some(ref ts) = ts {
4168            (ts.as_ptr() as *const c_char, ts.len())
4169        } else if self.timestamp.is_some() {
4170            // The stored timestamp is a `Some` but we're updating it to a `None`.
4171            // This means to cancel a previously set timestamp.
4172            // To do this, use a null pointer and zero length.
4173            (std::ptr::null(), 0)
4174        } else {
4175            return;
4176        };
4177        self.timestamp = ts;
4178        unsafe {
4179            ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4180        }
4181    }
4182
4183    /// See `set_timestamp`
4184    pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4185        self.set_iter_start_ts_impl(Some(ts.into()));
4186    }
4187
4188    fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4189        let (ptr, len) = if let Some(ref ts) = ts {
4190            (ts.as_ptr() as *const c_char, ts.len())
4191        } else if self.timestamp.is_some() {
4192            (std::ptr::null(), 0)
4193        } else {
4194            return;
4195        };
4196        self.iter_start_ts = ts;
4197        unsafe {
4198            ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4199        }
4200    }
4201}
4202
4203impl Default for ReadOptions {
4204    fn default() -> Self {
4205        unsafe {
4206            Self {
4207                inner: ffi::rocksdb_readoptions_create(),
4208                timestamp: None,
4209                iter_start_ts: None,
4210                iterate_upper_bound: None,
4211                iterate_lower_bound: None,
4212            }
4213        }
4214    }
4215}
4216
4217impl IngestExternalFileOptions {
4218    /// Can be set to true to move the files instead of copying them.
4219    pub fn set_move_files(&mut self, v: bool) {
4220        unsafe {
4221            ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4222        }
4223    }
4224
4225    /// If set to false, an ingested file keys could appear in existing snapshots
4226    /// that where created before the file was ingested.
4227    pub fn set_snapshot_consistency(&mut self, v: bool) {
4228        unsafe {
4229            ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4230                self.inner,
4231                c_uchar::from(v),
4232            );
4233        }
4234    }
4235
4236    /// If set to false, IngestExternalFile() will fail if the file key range
4237    /// overlaps with existing keys or tombstones in the DB.
4238    pub fn set_allow_global_seqno(&mut self, v: bool) {
4239        unsafe {
4240            ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4241                self.inner,
4242                c_uchar::from(v),
4243            );
4244        }
4245    }
4246
4247    /// If set to false and the file key range overlaps with the memtable key range
4248    /// (memtable flush required), IngestExternalFile will fail.
4249    pub fn set_allow_blocking_flush(&mut self, v: bool) {
4250        unsafe {
4251            ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4252                self.inner,
4253                c_uchar::from(v),
4254            );
4255        }
4256    }
4257
4258    /// Set to true if you would like duplicate keys in the file being ingested
4259    /// to be skipped rather than overwriting existing data under that key.
4260    /// Usecase: back-fill of some historical data in the database without
4261    /// over-writing existing newer version of data.
4262    /// This option could only be used if the DB has been running
4263    /// with allow_ingest_behind=true since the dawn of time.
4264    /// All files will be ingested at the bottommost level with seqno=0.
4265    pub fn set_ingest_behind(&mut self, v: bool) {
4266        unsafe {
4267            ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4268        }
4269    }
4270}
4271
4272impl Default for IngestExternalFileOptions {
4273    fn default() -> Self {
4274        unsafe {
4275            Self {
4276                inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4277            }
4278        }
4279    }
4280}
4281
4282/// Used by BlockBasedOptions::set_index_type.
4283pub enum BlockBasedIndexType {
4284    /// A space efficient index block that is optimized for
4285    /// binary-search-based index.
4286    BinarySearch,
4287
4288    /// The hash index, if enabled, will perform a hash lookup if
4289    /// a prefix extractor has been provided through Options::set_prefix_extractor.
4290    HashSearch,
4291
4292    /// A two-level index implementation. Both levels are binary search indexes.
4293    TwoLevelIndexSearch,
4294}
4295
4296/// Used by BlockBasedOptions::set_data_block_index_type.
4297#[repr(C)]
4298pub enum DataBlockIndexType {
4299    /// Use binary search when performing point lookup for keys in data blocks.
4300    /// This is the default.
4301    BinarySearch = 0,
4302
4303    /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4304    /// compatible with databases created without this feature. Once turned on, existing data will
4305    /// be gradually converted to the hash index format.
4306    BinaryAndHash = 1,
4307}
4308
4309/// Used by BlockBasedOptions for setting metadata cache pinning tiers.
4310/// Controls how metadata blocks (index, filter, etc.) are pinned in block cache.
4311#[repr(C)]
4312pub enum BlockBasedTablePinningTier {
4313    /// Use fallback pinning tier (context-dependent)
4314    Fallback = ffi::rocksdb_block_based_k_fallback_pinning_tier as isize,
4315    /// No pinning - blocks can be evicted at any time
4316    None = ffi::rocksdb_block_based_k_none_pinning_tier as isize,
4317    /// Pin blocks for flushed files and similar scenarios
4318    FlushAndSimilar = ffi::rocksdb_block_based_k_flush_and_similar_pinning_tier as isize,
4319    /// Pin all blocks (highest priority)
4320    All = ffi::rocksdb_block_based_k_all_pinning_tier as isize,
4321}
4322
4323/// Defines the underlying memtable implementation.
4324/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4325pub enum MemtableFactory {
4326    Vector,
4327    HashSkipList {
4328        bucket_count: usize,
4329        height: i32,
4330        branching_factor: i32,
4331    },
4332    HashLinkList {
4333        bucket_count: usize,
4334    },
4335}
4336
4337/// Used by BlockBasedOptions::set_checksum_type.
4338pub enum ChecksumType {
4339    NoChecksum = 0,
4340    CRC32c = 1,
4341    XXHash = 2,
4342    XXHash64 = 3,
4343    XXH3 = 4, // Supported since RocksDB 6.27
4344}
4345
4346/// Used in [`PlainTableFactoryOptions`].
4347#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4348pub enum KeyEncodingType {
4349    /// Always write full keys.
4350    #[default]
4351    Plain = 0,
4352    /// Find opportunities to write the same prefix for multiple rows.
4353    Prefix = 1,
4354}
4355
4356/// Used with DBOptions::set_plain_table_factory.
4357/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4358/// information.
4359///
4360/// Defaults:
4361///  user_key_length: 0 (variable length)
4362///  bloom_bits_per_key: 10
4363///  hash_table_ratio: 0.75
4364///  index_sparseness: 16
4365///  huge_page_tlb_size: 0
4366///  encoding_type: KeyEncodingType::Plain
4367///  full_scan_mode: false
4368///  store_index_in_file: false
4369pub struct PlainTableFactoryOptions {
4370    pub user_key_length: u32,
4371    pub bloom_bits_per_key: i32,
4372    pub hash_table_ratio: f64,
4373    pub index_sparseness: usize,
4374    pub huge_page_tlb_size: usize,
4375    pub encoding_type: KeyEncodingType,
4376    pub full_scan_mode: bool,
4377    pub store_index_in_file: bool,
4378}
4379
4380#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4381#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4382pub enum DBCompressionType {
4383    None = ffi::rocksdb_no_compression as isize,
4384    Snappy = ffi::rocksdb_snappy_compression as isize,
4385    Zlib = ffi::rocksdb_zlib_compression as isize,
4386    Bz2 = ffi::rocksdb_bz2_compression as isize,
4387    Lz4 = ffi::rocksdb_lz4_compression as isize,
4388    Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4389    Zstd = ffi::rocksdb_zstd_compression as isize,
4390}
4391
4392#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4393#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4394pub enum DBCompactionStyle {
4395    Level = ffi::rocksdb_level_compaction as isize,
4396    Universal = ffi::rocksdb_universal_compaction as isize,
4397    Fifo = ffi::rocksdb_fifo_compaction as isize,
4398}
4399
4400#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4401#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4402pub enum DBRecoveryMode {
4403    TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4404    AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4405    PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4406    SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4407}
4408
4409pub struct FifoCompactOptions {
4410    pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4411}
4412
4413impl Default for FifoCompactOptions {
4414    fn default() -> Self {
4415        let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4416        assert!(
4417            !opts.is_null(),
4418            "Could not create RocksDB Fifo Compaction Options"
4419        );
4420
4421        Self { inner: opts }
4422    }
4423}
4424
4425impl Drop for FifoCompactOptions {
4426    fn drop(&mut self) {
4427        unsafe {
4428            ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4429        }
4430    }
4431}
4432
4433impl FifoCompactOptions {
4434    /// Sets the max table file size.
4435    ///
4436    /// Once the total sum of table files reaches this, we will delete the oldest
4437    /// table file
4438    ///
4439    /// Default: 1GB
4440    pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4441        unsafe {
4442            ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4443        }
4444    }
4445}
4446
4447#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4448#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4449pub enum UniversalCompactionStopStyle {
4450    Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4451    Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4452}
4453
4454pub struct UniversalCompactOptions {
4455    pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4456}
4457
4458impl Default for UniversalCompactOptions {
4459    fn default() -> Self {
4460        let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4461        assert!(
4462            !opts.is_null(),
4463            "Could not create RocksDB Universal Compaction Options"
4464        );
4465
4466        Self { inner: opts }
4467    }
4468}
4469
4470impl Drop for UniversalCompactOptions {
4471    fn drop(&mut self) {
4472        unsafe {
4473            ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4474        }
4475    }
4476}
4477
4478impl UniversalCompactOptions {
4479    /// Sets the percentage flexibility while comparing file size.
4480    /// If the candidate file(s) size is 1% smaller than the next file's size,
4481    /// then include next file into this candidate set.
4482    ///
4483    /// Default: 1
4484    pub fn set_size_ratio(&mut self, ratio: c_int) {
4485        unsafe {
4486            ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4487        }
4488    }
4489
4490    /// Sets the minimum number of files in a single compaction run.
4491    ///
4492    /// Default: 2
4493    pub fn set_min_merge_width(&mut self, num: c_int) {
4494        unsafe {
4495            ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4496        }
4497    }
4498
4499    /// Sets the maximum number of files in a single compaction run.
4500    ///
4501    /// Default: UINT_MAX
4502    pub fn set_max_merge_width(&mut self, num: c_int) {
4503        unsafe {
4504            ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4505        }
4506    }
4507
4508    /// sets the size amplification.
4509    ///
4510    /// It is defined as the amount (in percentage) of
4511    /// additional storage needed to store a single byte of data in the database.
4512    /// For example, a size amplification of 2% means that a database that
4513    /// contains 100 bytes of user-data may occupy upto 102 bytes of
4514    /// physical storage. By this definition, a fully compacted database has
4515    /// a size amplification of 0%. Rocksdb uses the following heuristic
4516    /// to calculate size amplification: it assumes that all files excluding
4517    /// the earliest file contribute to the size amplification.
4518    ///
4519    /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4520    pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4521        unsafe {
4522            ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4523                self.inner, v,
4524            );
4525        }
4526    }
4527
4528    /// Sets the percentage of compression size.
4529    ///
4530    /// If this option is set to be -1, all the output files
4531    /// will follow compression type specified.
4532    ///
4533    /// If this option is not negative, we will try to make sure compressed
4534    /// size is just above this value. In normal cases, at least this percentage
4535    /// of data will be compressed.
4536    /// When we are compacting to a new file, here is the criteria whether
4537    /// it needs to be compressed: assuming here are the list of files sorted
4538    /// by generation time:
4539    ///    A1...An B1...Bm C1...Ct
4540    /// where A1 is the newest and Ct is the oldest, and we are going to compact
4541    /// B1...Bm, we calculate the total size of all the files as total_size, as
4542    /// well as  the total size of C1...Ct as total_C, the compaction output file
4543    /// will be compressed iff
4544    ///   total_C / total_size < this percentage
4545    ///
4546    /// Default: -1
4547    pub fn set_compression_size_percent(&mut self, v: c_int) {
4548        unsafe {
4549            ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4550        }
4551    }
4552
4553    /// Sets the algorithm used to stop picking files into a single compaction run.
4554    ///
4555    /// Default: ::Total
4556    pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4557        unsafe {
4558            ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4559        }
4560    }
4561}
4562
4563#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4564#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4565#[repr(u8)]
4566pub enum BottommostLevelCompaction {
4567    /// Skip bottommost level compaction
4568    Skip = 0,
4569    /// Only compact bottommost level if there is a compaction filter
4570    /// This is the default option
4571    IfHaveCompactionFilter,
4572    /// Always compact bottommost level
4573    Force,
4574    /// Always compact bottommost level but in bottommost level avoid
4575    /// double-compacting files created in the same compaction
4576    ForceOptimized,
4577}
4578
4579pub struct CompactOptions {
4580    pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4581    full_history_ts_low: Option<Vec<u8>>,
4582}
4583
4584impl Default for CompactOptions {
4585    fn default() -> Self {
4586        let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4587        assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4588
4589        Self {
4590            inner: opts,
4591            full_history_ts_low: None,
4592        }
4593    }
4594}
4595
4596impl Drop for CompactOptions {
4597    fn drop(&mut self) {
4598        unsafe {
4599            ffi::rocksdb_compactoptions_destroy(self.inner);
4600        }
4601    }
4602}
4603
4604impl CompactOptions {
4605    /// If more than one thread calls manual compaction,
4606    /// only one will actually schedule it while the other threads will simply wait
4607    /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4608    /// is set to true, the call will disable scheduling of automatic compaction jobs
4609    /// and wait for existing automatic compaction jobs to finish.
4610    pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4611        unsafe {
4612            ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4613                self.inner,
4614                c_uchar::from(v),
4615            );
4616        }
4617    }
4618
4619    /// Sets bottommost level compaction.
4620    pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4621        unsafe {
4622            ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4623        }
4624    }
4625
4626    /// If true, compacted files will be moved to the minimum level capable
4627    /// of holding the data or given level (specified non-negative target_level).
4628    pub fn set_change_level(&mut self, v: bool) {
4629        unsafe {
4630            ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4631        }
4632    }
4633
4634    /// If change_level is true and target_level have non-negative value, compacted
4635    /// files will be moved to target_level.
4636    pub fn set_target_level(&mut self, lvl: c_int) {
4637        unsafe {
4638            ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4639        }
4640    }
4641
4642    /// Set user-defined timestamp low bound, the data with older timestamp than
4643    /// low bound maybe GCed by compaction. Default: nullptr
4644    pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4645        self.set_full_history_ts_low_impl(Some(ts.into()));
4646    }
4647
4648    fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4649        let (ptr, len) = if let Some(ref ts) = ts {
4650            (ts.as_ptr() as *mut c_char, ts.len())
4651        } else if self.full_history_ts_low.is_some() {
4652            (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4653        } else {
4654            return;
4655        };
4656        self.full_history_ts_low = ts;
4657        unsafe {
4658            ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4659        }
4660    }
4661}
4662
4663pub struct WaitForCompactOptions {
4664    pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4665}
4666
4667impl Default for WaitForCompactOptions {
4668    fn default() -> Self {
4669        let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4670        assert!(
4671            !opts.is_null(),
4672            "Could not create RocksDB Wait For Compact Options"
4673        );
4674
4675        Self { inner: opts }
4676    }
4677}
4678
4679impl Drop for WaitForCompactOptions {
4680    fn drop(&mut self) {
4681        unsafe {
4682            ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4683        }
4684    }
4685}
4686
4687impl WaitForCompactOptions {
4688    /// If true, abort waiting if background jobs are paused. If false,
4689    /// ContinueBackgroundWork() must be called to resume the background jobs.
4690    /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4691    /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4692    /// abort after the timeout).
4693    ///
4694    /// Default: false
4695    pub fn set_abort_on_pause(&mut self, v: bool) {
4696        unsafe {
4697            ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4698        }
4699    }
4700
4701    /// If true, flush all column families before starting to wait.
4702    ///
4703    /// Default: false
4704    pub fn set_flush(&mut self, v: bool) {
4705        unsafe {
4706            ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4707        }
4708    }
4709
4710    /// Timeout in microseconds for waiting for compaction to complete.
4711    /// when timeout == 0, WaitForCompact() will wait as long as there's background
4712    /// work to finish.
4713    ///
4714    /// Default: 0
4715    pub fn set_timeout(&mut self, microseconds: u64) {
4716        unsafe {
4717            ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4718        }
4719    }
4720}
4721
4722/// Represents a path where sst files can be put into
4723pub struct DBPath {
4724    pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4725}
4726
4727impl DBPath {
4728    /// Create a new path
4729    pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
4730        let p = to_cpath(path.as_ref()).unwrap();
4731        let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
4732        if dbpath.is_null() {
4733            Err(Error::new(format!(
4734                "Could not create path for storing sst files at location: {}",
4735                path.as_ref().display()
4736            )))
4737        } else {
4738            Ok(DBPath { inner: dbpath })
4739        }
4740    }
4741}
4742
4743impl Drop for DBPath {
4744    fn drop(&mut self) {
4745        unsafe {
4746            ffi::rocksdb_dbpath_destroy(self.inner);
4747        }
4748    }
4749}
4750
4751#[cfg(test)]
4752mod tests {
4753    use crate::db_options::WriteBufferManager;
4754    use crate::{Cache, CompactionPri, MemtableFactory, Options};
4755
4756    #[test]
4757    fn test_enable_statistics() {
4758        let mut opts = Options::default();
4759        opts.enable_statistics();
4760        opts.set_stats_dump_period_sec(60);
4761        assert!(opts.get_statistics().is_some());
4762
4763        let opts = Options::default();
4764        assert!(opts.get_statistics().is_none());
4765    }
4766
4767    #[test]
4768    fn test_set_memtable_factory() {
4769        let mut opts = Options::default();
4770        opts.set_memtable_factory(MemtableFactory::Vector);
4771        opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
4772        opts.set_memtable_factory(MemtableFactory::HashSkipList {
4773            bucket_count: 100,
4774            height: 4,
4775            branching_factor: 4,
4776        });
4777    }
4778
4779    #[test]
4780    fn test_use_fsync() {
4781        let mut opts = Options::default();
4782        assert!(!opts.get_use_fsync());
4783        opts.set_use_fsync(true);
4784        assert!(opts.get_use_fsync());
4785    }
4786
4787    #[test]
4788    fn test_set_stats_persist_period_sec() {
4789        let mut opts = Options::default();
4790        opts.enable_statistics();
4791        opts.set_stats_persist_period_sec(5);
4792        assert!(opts.get_statistics().is_some());
4793
4794        let opts = Options::default();
4795        assert!(opts.get_statistics().is_none());
4796    }
4797
4798    #[test]
4799    fn test_set_write_buffer_manager() {
4800        let mut opts = Options::default();
4801        let lrucache = Cache::new_lru_cache(100);
4802        let write_buffer_manager =
4803            WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
4804        assert_eq!(write_buffer_manager.get_buffer_size(), 100);
4805        assert_eq!(write_buffer_manager.get_usage(), 0);
4806        assert!(write_buffer_manager.enabled());
4807
4808        opts.set_write_buffer_manager(&write_buffer_manager);
4809        drop(opts);
4810
4811        // WriteBufferManager outlives options
4812        assert!(write_buffer_manager.enabled());
4813    }
4814
4815    #[test]
4816    fn compaction_pri() {
4817        let mut opts = Options::default();
4818        opts.set_compaction_pri(CompactionPri::RoundRobin);
4819        opts.create_if_missing(true);
4820        let tmp = tempfile::tempdir().unwrap();
4821        let _db = crate::DB::open(&opts, tmp.path()).unwrap();
4822
4823        let options = std::fs::read_dir(tmp.path())
4824            .unwrap()
4825            .find_map(|x| {
4826                let x = x.ok()?;
4827                x.file_name()
4828                    .into_string()
4829                    .unwrap()
4830                    .contains("OPTIONS")
4831                    .then_some(x.path())
4832            })
4833            .map(std::fs::read_to_string)
4834            .unwrap()
4835            .unwrap();
4836
4837        assert!(options.contains("compaction_pri=kRoundRobin"));
4838    }
4839}