1 // Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 
6 package org.rocksdb;
7 
8 /**
9  * Advanced Column Family Options which are mutable
10  *
11  * Taken from include/rocksdb/advanced_options.h
12  * and MutableCFOptions in util/cf_options.h
13  */
14 public interface AdvancedMutableColumnFamilyOptionsInterface<
15     T extends AdvancedMutableColumnFamilyOptionsInterface<T>> {
16   /**
17    * The maximum number of write buffers that are built up in memory.
18    * The default is 2, so that when 1 write buffer is being flushed to
19    * storage, new writes can continue to the other write buffer.
20    * Default: 2
21    *
22    * @param maxWriteBufferNumber maximum number of write buffers.
23    * @return the instance of the current options.
24    */
setMaxWriteBufferNumber( int maxWriteBufferNumber)25   T setMaxWriteBufferNumber(
26       int maxWriteBufferNumber);
27 
28   /**
29    * Returns maximum number of write buffers.
30    *
31    * @return maximum number of write buffers.
32    * @see #setMaxWriteBufferNumber(int)
33    */
maxWriteBufferNumber()34   int maxWriteBufferNumber();
35 
36   /**
37    * Number of locks used for inplace update
38    * Default: 10000, if inplace_update_support = true, else 0.
39    *
40    * @param inplaceUpdateNumLocks the number of locks used for
41    *     inplace updates.
42    * @return the reference to the current options.
43    * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
44    *     while overflowing the underlying platform specific value.
45    */
setInplaceUpdateNumLocks( long inplaceUpdateNumLocks)46   T setInplaceUpdateNumLocks(
47       long inplaceUpdateNumLocks);
48 
49   /**
50    * Number of locks used for inplace update
51    * Default: 10000, if inplace_update_support = true, else 0.
52    *
53    * @return the number of locks used for inplace update.
54    */
inplaceUpdateNumLocks()55   long inplaceUpdateNumLocks();
56 
57   /**
58    * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
59    * create prefix bloom for memtable with the size of
60    * write_buffer_size * memtable_prefix_bloom_size_ratio.
61    * If it is larger than 0.25, it is santinized to 0.25.
62    *
63    * Default: 0 (disable)
64    *
65    * @param memtablePrefixBloomSizeRatio The ratio
66    * @return the reference to the current options.
67    */
setMemtablePrefixBloomSizeRatio( double memtablePrefixBloomSizeRatio)68   T setMemtablePrefixBloomSizeRatio(
69       double memtablePrefixBloomSizeRatio);
70 
71   /**
72    * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
73    * create prefix bloom for memtable with the size of
74    * write_buffer_size * memtable_prefix_bloom_size_ratio.
75    * If it is larger than 0.25, it is santinized to 0.25.
76    *
77    * Default: 0 (disable)
78    *
79    * @return the ratio
80    */
memtablePrefixBloomSizeRatio()81   double memtablePrefixBloomSizeRatio();
82 
83   /**
84    * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
85    * from huge page TLB but from malloc.
86    * Need to reserve huge pages for it to be allocated. For example:
87    *     sysctl -w vm.nr_hugepages=20
88    * See linux doc Documentation/vm/hugetlbpage.txt
89    *
90    * @param memtableHugePageSize The page size of the huge
91    *     page tlb
92    * @return the reference to the current options.
93    */
setMemtableHugePageSize( long memtableHugePageSize)94   T setMemtableHugePageSize(
95       long memtableHugePageSize);
96 
97   /**
98    * Page size for huge page TLB for bloom in memtable. If &le; 0, not allocate
99    * from huge page TLB but from malloc.
100    * Need to reserve huge pages for it to be allocated. For example:
101    *     sysctl -w vm.nr_hugepages=20
102    * See linux doc Documentation/vm/hugetlbpage.txt
103    *
104    * @return The page size of the huge page tlb
105    */
memtableHugePageSize()106   long memtableHugePageSize();
107 
108   /**
109    * The size of one block in arena memory allocation.
110    * If &le; 0, a proper value is automatically calculated (usually 1/10 of
111    * writer_buffer_size).
112    *
113    * There are two additional restriction of the specified size:
114    * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
115    * (2) be the multiple of the CPU word (which helps with the memory
116    * alignment).
117    *
118    * We'll automatically check and adjust the size number to make sure it
119    * conforms to the restrictions.
120    * Default: 0
121    *
122    * @param arenaBlockSize the size of an arena block
123    * @return the reference to the current options.
124    * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
125    *   while overflowing the underlying platform specific value.
126    */
setArenaBlockSize(long arenaBlockSize)127   T setArenaBlockSize(long arenaBlockSize);
128 
129   /**
130    * The size of one block in arena memory allocation.
131    * If &le; 0, a proper value is automatically calculated (usually 1/10 of
132    * writer_buffer_size).
133    *
134    * There are two additional restriction of the specified size:
135    * (1) size should be in the range of [4096, 2 &lt;&lt; 30] and
136    * (2) be the multiple of the CPU word (which helps with the memory
137    * alignment).
138    *
139    * We'll automatically check and adjust the size number to make sure it
140    * conforms to the restrictions.
141    * Default: 0
142    *
143    * @return the size of an arena block
144    */
arenaBlockSize()145   long arenaBlockSize();
146 
147   /**
148    * Soft limit on number of level-0 files. We start slowing down writes at this
149    * point. A value &lt; 0 means that no writing slow down will be triggered by
150    * number of files in level-0.
151    *
152    * @param level0SlowdownWritesTrigger The soft limit on the number of
153    *   level-0 files
154    * @return the reference to the current options.
155    */
setLevel0SlowdownWritesTrigger( int level0SlowdownWritesTrigger)156   T setLevel0SlowdownWritesTrigger(
157       int level0SlowdownWritesTrigger);
158 
159   /**
160    * Soft limit on number of level-0 files. We start slowing down writes at this
161    * point. A value &lt; 0 means that no writing slow down will be triggered by
162    * number of files in level-0.
163    *
164    * @return The soft limit on the number of
165    *   level-0 files
166    */
level0SlowdownWritesTrigger()167   int level0SlowdownWritesTrigger();
168 
169   /**
170    * Maximum number of level-0 files.  We stop writes at this point.
171    *
172    * @param level0StopWritesTrigger The maximum number of level-0 files
173    * @return the reference to the current options.
174    */
setLevel0StopWritesTrigger( int level0StopWritesTrigger)175   T setLevel0StopWritesTrigger(
176       int level0StopWritesTrigger);
177 
178   /**
179    * Maximum number of level-0 files.  We stop writes at this point.
180    *
181    * @return The maximum number of level-0 files
182    */
level0StopWritesTrigger()183   int level0StopWritesTrigger();
184 
185   /**
186    * The target file size for compaction.
187    * This targetFileSizeBase determines a level-1 file size.
188    * Target file size for level L can be calculated by
189    * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
190    * For example, if targetFileSizeBase is 2MB and
191    * target_file_size_multiplier is 10, then each file on level-1 will
192    * be 2MB, and each file on level 2 will be 20MB,
193    * and each file on level-3 will be 200MB.
194    * by default targetFileSizeBase is 64MB.
195    *
196    * @param targetFileSizeBase the target size of a level-0 file.
197    * @return the reference to the current options.
198    *
199    * @see #setTargetFileSizeMultiplier(int)
200    */
setTargetFileSizeBase( long targetFileSizeBase)201   T setTargetFileSizeBase(
202       long targetFileSizeBase);
203 
204   /**
205    * The target file size for compaction.
206    * This targetFileSizeBase determines a level-1 file size.
207    * Target file size for level L can be calculated by
208    * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1))
209    * For example, if targetFileSizeBase is 2MB and
210    * target_file_size_multiplier is 10, then each file on level-1 will
211    * be 2MB, and each file on level 2 will be 20MB,
212    * and each file on level-3 will be 200MB.
213    * by default targetFileSizeBase is 64MB.
214    *
215    * @return the target size of a level-0 file.
216    *
217    * @see #targetFileSizeMultiplier()
218    */
targetFileSizeBase()219   long targetFileSizeBase();
220 
221   /**
222    * targetFileSizeMultiplier defines the size ratio between a
223    * level-L file and level-(L+1) file.
224    * By default target_file_size_multiplier is 1, meaning
225    * files in different levels have the same target.
226    *
227    * @param multiplier the size ratio between a level-(L+1) file
228    *     and level-L file.
229    * @return the reference to the current options.
230    */
setTargetFileSizeMultiplier( int multiplier)231   T setTargetFileSizeMultiplier(
232       int multiplier);
233 
234   /**
235    * targetFileSizeMultiplier defines the size ratio between a
236    * level-(L+1) file and level-L file.
237    * By default targetFileSizeMultiplier is 1, meaning
238    * files in different levels have the same target.
239    *
240    * @return the size ratio between a level-(L+1) file and level-L file.
241    */
targetFileSizeMultiplier()242   int targetFileSizeMultiplier();
243 
244   /**
245    * The ratio between the total size of level-(L+1) files and the total
246    * size of level-L files for all L.
247    * DEFAULT: 10
248    *
249    * @param multiplier the ratio between the total size of level-(L+1)
250    *     files and the total size of level-L files for all L.
251    * @return the reference to the current options.
252    *
253    * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)}
254    */
setMaxBytesForLevelMultiplier(double multiplier)255   T setMaxBytesForLevelMultiplier(double multiplier);
256 
257   /**
258    * The ratio between the total size of level-(L+1) files and the total
259    * size of level-L files for all L.
260    * DEFAULT: 10
261    *
262    * @return the ratio between the total size of level-(L+1) files and
263    *     the total size of level-L files for all L.
264    *
265    * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()}
266    */
maxBytesForLevelMultiplier()267   double maxBytesForLevelMultiplier();
268 
269   /**
270    * Different max-size multipliers for different levels.
271    * These are multiplied by max_bytes_for_level_multiplier to arrive
272    * at the max-size of each level.
273    *
274    * Default: 1
275    *
276    * @param maxBytesForLevelMultiplierAdditional The max-size multipliers
277    *   for each level
278    * @return the reference to the current options.
279    */
setMaxBytesForLevelMultiplierAdditional( int[] maxBytesForLevelMultiplierAdditional)280   T setMaxBytesForLevelMultiplierAdditional(
281       int[] maxBytesForLevelMultiplierAdditional);
282 
283   /**
284    * Different max-size multipliers for different levels.
285    * These are multiplied by max_bytes_for_level_multiplier to arrive
286    * at the max-size of each level.
287    *
288    * Default: 1
289    *
290    * @return The max-size multipliers for each level
291    */
maxBytesForLevelMultiplierAdditional()292   int[] maxBytesForLevelMultiplierAdditional();
293 
294   /**
295    * All writes will be slowed down to at least delayed_write_rate if estimated
296    * bytes needed to be compaction exceed this threshold.
297    *
298    * Default: 64GB
299    *
300    * @param softPendingCompactionBytesLimit The soft limit to impose on
301    *   compaction
302    * @return the reference to the current options.
303    */
setSoftPendingCompactionBytesLimit( long softPendingCompactionBytesLimit)304   T setSoftPendingCompactionBytesLimit(
305       long softPendingCompactionBytesLimit);
306 
307   /**
308    * All writes will be slowed down to at least delayed_write_rate if estimated
309    * bytes needed to be compaction exceed this threshold.
310    *
311    * Default: 64GB
312    *
313    * @return The soft limit to impose on compaction
314    */
softPendingCompactionBytesLimit()315   long softPendingCompactionBytesLimit();
316 
317   /**
318    * All writes are stopped if estimated bytes needed to be compaction exceed
319    * this threshold.
320    *
321    * Default: 256GB
322    *
323    * @param hardPendingCompactionBytesLimit The hard limit to impose on
324    *   compaction
325    * @return the reference to the current options.
326    */
setHardPendingCompactionBytesLimit( long hardPendingCompactionBytesLimit)327   T setHardPendingCompactionBytesLimit(
328       long hardPendingCompactionBytesLimit);
329 
330   /**
331    * All writes are stopped if estimated bytes needed to be compaction exceed
332    * this threshold.
333    *
334    * Default: 256GB
335    *
336    * @return The hard limit to impose on compaction
337    */
hardPendingCompactionBytesLimit()338   long hardPendingCompactionBytesLimit();
339 
340   /**
341    * An iteration-&gt;Next() sequentially skips over keys with the same
342    * user-key unless this option is set. This number specifies the number
343    * of keys (with the same userkey) that will be sequentially
344    * skipped before a reseek is issued.
345    * Default: 8
346    *
347    * @param maxSequentialSkipInIterations the number of keys could
348    *     be skipped in a iteration.
349    * @return the reference to the current options.
350    */
setMaxSequentialSkipInIterations( long maxSequentialSkipInIterations)351   T setMaxSequentialSkipInIterations(
352       long maxSequentialSkipInIterations);
353 
354   /**
355    * An iteration-&gt;Next() sequentially skips over keys with the same
356    * user-key unless this option is set. This number specifies the number
357    * of keys (with the same userkey) that will be sequentially
358    * skipped before a reseek is issued.
359    * Default: 8
360    *
361    * @return the number of keys could be skipped in a iteration.
362    */
maxSequentialSkipInIterations()363   long maxSequentialSkipInIterations();
364 
365   /**
366    * Maximum number of successive merge operations on a key in the memtable.
367    *
368    * When a merge operation is added to the memtable and the maximum number of
369    * successive merges is reached, the value of the key will be calculated and
370    * inserted into the memtable instead of the merge operation. This will
371    * ensure that there are never more than max_successive_merges merge
372    * operations in the memtable.
373    *
374    * Default: 0 (disabled)
375    *
376    * @param maxSuccessiveMerges the maximum number of successive merges.
377    * @return the reference to the current options.
378    * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
379    *   while overflowing the underlying platform specific value.
380    */
setMaxSuccessiveMerges( long maxSuccessiveMerges)381   T setMaxSuccessiveMerges(
382       long maxSuccessiveMerges);
383 
384   /**
385    * Maximum number of successive merge operations on a key in the memtable.
386    *
387    * When a merge operation is added to the memtable and the maximum number of
388    * successive merges is reached, the value of the key will be calculated and
389    * inserted into the memtable instead of the merge operation. This will
390    * ensure that there are never more than max_successive_merges merge
391    * operations in the memtable.
392    *
393    * Default: 0 (disabled)
394    *
395    * @return the maximum number of successive merges.
396    */
maxSuccessiveMerges()397   long maxSuccessiveMerges();
398 
399   /**
400    * After writing every SST file, reopen it and read all the keys.
401    *
402    * Default: false
403    *
404    * @param paranoidFileChecks true to enable paranoid file checks
405    * @return the reference to the current options.
406    */
setParanoidFileChecks( boolean paranoidFileChecks)407   T setParanoidFileChecks(
408       boolean paranoidFileChecks);
409 
410   /**
411    * After writing every SST file, reopen it and read all the keys.
412    *
413    * Default: false
414    *
415    * @return true if paranoid file checks are enabled
416    */
paranoidFileChecks()417   boolean paranoidFileChecks();
418 
419   /**
420    * Measure IO stats in compactions and flushes, if true.
421    *
422    * Default: false
423    *
424    * @param reportBgIoStats true to enable reporting
425    * @return the reference to the current options.
426    */
setReportBgIoStats( boolean reportBgIoStats)427   T setReportBgIoStats(
428       boolean reportBgIoStats);
429 
430   /**
431    * Determine whether IO stats in compactions and flushes are being measured
432    *
433    * @return true if reporting is enabled
434    */
reportBgIoStats()435   boolean reportBgIoStats();
436 
437   /**
438    * Non-bottom-level files older than TTL will go through the compaction
439    * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be
440    * set to -1.
441    *
442    * Enabled only for level compaction for now.
443    *
444    * Default: 0 (disabled)
445    *
446    * Dynamically changeable through
447    * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}.
448    *
449    * @param ttl the time-to-live.
450    *
451    * @return the reference to the current options.
452    */
setTtl(final long ttl)453   T setTtl(final long ttl);
454 
455   /**
456    * Get the TTL for Non-bottom-level files that will go through the compaction
457    * process.
458    *
459    * See {@link #setTtl(long)}.
460    *
461    * @return the time-to-live.
462    */
ttl()463   long ttl();
464 }
465