1 // Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 
6 package org.rocksdb;
7 
8 public interface ColumnFamilyOptionsInterface<T extends ColumnFamilyOptionsInterface<T>>
9     extends AdvancedColumnFamilyOptionsInterface<T> {
10   /**
11    * Use this if your DB is very small (like under 1GB) and you don't want to
12    * spend lots of memory for memtables.
13    *
14    * @return the instance of the current object.
15    */
optimizeForSmallDb()16   T optimizeForSmallDb();
17 
18   /**
19    * Use this if you don't need to keep the data sorted, i.e. you'll never use
20    * an iterator, only Put() and Get() API calls
21    *
22    * @param blockCacheSizeMb Block cache size in MB
23    * @return the instance of the current object.
24    */
optimizeForPointLookup(long blockCacheSizeMb)25   T optimizeForPointLookup(long blockCacheSizeMb);
26 
27   /**
28    * <p>Default values for some parameters in ColumnFamilyOptions are not
29    * optimized for heavy workloads and big datasets, which means you might
30    * observe write stalls under some conditions. As a starting point for tuning
31    * RocksDB options, use the following for level style compaction.</p>
32    *
33    * <p>Make sure to also call IncreaseParallelism(), which will provide the
34    * biggest performance gains.</p>
35    * <p>Note: we might use more memory than memtable_memory_budget during high
36    * write rate period</p>
37    *
38    * @return the instance of the current object.
39    */
optimizeLevelStyleCompaction()40   T optimizeLevelStyleCompaction();
41 
42   /**
43    * <p>Default values for some parameters in ColumnFamilyOptions are not
44    * optimized for heavy workloads and big datasets, which means you might
45    * observe write stalls under some conditions. As a starting point for tuning
46    * RocksDB options, use the following for level style compaction.</p>
47    *
48    * <p>Make sure to also call IncreaseParallelism(), which will provide the
49    * biggest performance gains.</p>
50    * <p>Note: we might use more memory than memtable_memory_budget during high
51    * write rate period</p>
52    *
53    * @param memtableMemoryBudget memory budget in bytes
54    * @return the instance of the current object.
55    */
optimizeLevelStyleCompaction( long memtableMemoryBudget)56   T optimizeLevelStyleCompaction(
57       long memtableMemoryBudget);
58 
59   /**
60    * <p>Default values for some parameters in ColumnFamilyOptions are not
61    * optimized for heavy workloads and big datasets, which means you might
62    * observe write stalls under some conditions. As a starting point for tuning
63    * RocksDB options, use the following for universal style compaction.</p>
64    *
65    * <p>Universal style compaction is focused on reducing Write Amplification
66    * Factor for big data sets, but increases Space Amplification.</p>
67    *
68    * <p>Make sure to also call IncreaseParallelism(), which will provide the
69    * biggest performance gains.</p>
70    *
71    * <p>Note: we might use more memory than memtable_memory_budget during high
72    * write rate period</p>
73    *
74    * @return the instance of the current object.
75    */
optimizeUniversalStyleCompaction()76   T optimizeUniversalStyleCompaction();
77 
78   /**
79    * <p>Default values for some parameters in ColumnFamilyOptions are not
80    * optimized for heavy workloads and big datasets, which means you might
81    * observe write stalls under some conditions. As a starting point for tuning
82    * RocksDB options, use the following for universal style compaction.</p>
83    *
84    * <p>Universal style compaction is focused on reducing Write Amplification
85    * Factor for big data sets, but increases Space Amplification.</p>
86    *
87    * <p>Make sure to also call IncreaseParallelism(), which will provide the
88    * biggest performance gains.</p>
89    *
90    * <p>Note: we might use more memory than memtable_memory_budget during high
91    * write rate period</p>
92    *
93    * @param memtableMemoryBudget memory budget in bytes
94    * @return the instance of the current object.
95    */
optimizeUniversalStyleCompaction( long memtableMemoryBudget)96   T optimizeUniversalStyleCompaction(
97       long memtableMemoryBudget);
98 
99   /**
100    * Set {@link BuiltinComparator} to be used with RocksDB.
101    *
102    * Note: Comparator can be set once upon database creation.
103    *
104    * Default: BytewiseComparator.
105    * @param builtinComparator a {@link BuiltinComparator} type.
106    * @return the instance of the current object.
107    */
setComparator( BuiltinComparator builtinComparator)108   T setComparator(
109       BuiltinComparator builtinComparator);
110 
111   /**
112    * Use the specified comparator for key ordering.
113    *
114    * Comparator should not be disposed before options instances using this comparator is
115    * disposed. If dispose() function is not called, then comparator object will be
116    * GC'd automatically.
117    *
118    * Comparator instance can be re-used in multiple options instances.
119    *
120    * @param comparator java instance.
121    * @return the instance of the current object.
122    */
setComparator( AbstractComparator comparator)123   T setComparator(
124       AbstractComparator comparator);
125 
126   /**
127    * <p>Set the merge operator to be used for merging two merge operands
128    * of the same key. The merge function is invoked during
129    * compaction and at lookup time, if multiple key/value pairs belonging
130    * to the same key are found in the database.</p>
131    *
132    * @param name the name of the merge function, as defined by
133    * the MergeOperators factory (see utilities/MergeOperators.h)
134    * The merge function is specified by name and must be one of the
135    * standard merge operators provided by RocksDB. The available
136    * operators are "put", "uint64add", "stringappend" and "stringappendtest".
137    * @return the instance of the current object.
138    */
setMergeOperatorName(String name)139   T setMergeOperatorName(String name);
140 
141   /**
142    * <p>Set the merge operator to be used for merging two different key/value
143    * pairs that share the same key. The merge function is invoked during
144    * compaction and at lookup time, if multiple key/value pairs belonging
145    * to the same key are found in the database.</p>
146    *
147    * @param mergeOperator {@link MergeOperator} instance.
148    * @return the instance of the current object.
149    */
setMergeOperator(MergeOperator mergeOperator)150   T setMergeOperator(MergeOperator mergeOperator);
151 
152   /**
153    * A single CompactionFilter instance to call into during compaction.
154    * Allows an application to modify/delete a key-value during background
155    * compaction.
156    *
157    * If the client requires a new compaction filter to be used for different
158    * compaction runs, it can specify call
159    * {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
160    * instead.
161    *
162    * The client should specify only set one of the two.
163    * {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence
164    * over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)}
165    * if the client specifies both.
166    *
167    * If multithreaded compaction is being used, the supplied CompactionFilter
168    * instance may be used from different threads concurrently and so should be thread-safe.
169    *
170    * @param compactionFilter {@link AbstractCompactionFilter} instance.
171    * @return the instance of the current object.
172    */
setCompactionFilter( final AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter)173   T setCompactionFilter(
174           final AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter);
175 
176   /**
177    * Accessor for the CompactionFilter instance in use.
178    *
179    * @return  Reference to the CompactionFilter, or null if one hasn't been set.
180    */
compactionFilter()181   AbstractCompactionFilter<? extends AbstractSlice<?>> compactionFilter();
182 
183   /**
184    * This is a factory that provides {@link AbstractCompactionFilter} objects
185    * which allow an application to modify/delete a key-value during background
186    * compaction.
187    *
188    * A new filter will be created on each compaction run.  If multithreaded
189    * compaction is being used, each created CompactionFilter will only be used
190    * from a single thread and so does not need to be thread-safe.
191    *
192    * @param compactionFilterFactory {@link AbstractCompactionFilterFactory} instance.
193    * @return the instance of the current object.
194    */
setCompactionFilterFactory( final AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory)195   T setCompactionFilterFactory(
196           final AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>>
197                   compactionFilterFactory);
198 
199   /**
200    * Accessor for the CompactionFilterFactory instance in use.
201    *
202    * @return  Reference to the CompactionFilterFactory, or null if one hasn't been set.
203    */
compactionFilterFactory()204   AbstractCompactionFilterFactory<? extends AbstractCompactionFilter<?>> compactionFilterFactory();
205 
206   /**
207    * This prefix-extractor uses the first n bytes of a key as its prefix.
208    *
209    * In some hash-based memtable representation such as HashLinkedList
210    * and HashSkipList, prefixes are used to partition the keys into
211    * several buckets.  Prefix extractor is used to specify how to
212    * extract the prefix given a key.
213    *
214    * @param n use the first n bytes of a key as its prefix.
215    * @return the reference to the current option.
216    */
useFixedLengthPrefixExtractor(int n)217   T useFixedLengthPrefixExtractor(int n);
218 
219   /**
220    * Same as fixed length prefix extractor, except that when slice is
221    * shorter than the fixed length, it will use the full key.
222    *
223    * @param n use the first n bytes of a key as its prefix.
224    * @return the reference to the current option.
225    */
useCappedPrefixExtractor(int n)226   T useCappedPrefixExtractor(int n);
227 
228   /**
229    * Number of files to trigger level-0 compaction. A value &lt; 0 means that
230    * level-0 compaction will not be triggered by number of files at all.
231    * Default: 4
232    *
233    * @param numFiles the number of files in level-0 to trigger compaction.
234    * @return the reference to the current option.
235    */
setLevelZeroFileNumCompactionTrigger( int numFiles)236   T setLevelZeroFileNumCompactionTrigger(
237       int numFiles);
238 
239   /**
240    * The number of files in level 0 to trigger compaction from level-0 to
241    * level-1.  A value &lt; 0 means that level-0 compaction will not be
242    * triggered by number of files at all.
243    * Default: 4
244    *
245    * @return the number of files in level 0 to trigger compaction.
246    */
levelZeroFileNumCompactionTrigger()247   int levelZeroFileNumCompactionTrigger();
248 
249   /**
250    * Soft limit on number of level-0 files. We start slowing down writes at this
251    * point. A value &lt; 0 means that no writing slow down will be triggered by
252    * number of files in level-0.
253    *
254    * @param numFiles soft limit on number of level-0 files.
255    * @return the reference to the current option.
256    */
setLevelZeroSlowdownWritesTrigger( int numFiles)257   T setLevelZeroSlowdownWritesTrigger(
258       int numFiles);
259 
260   /**
261    * Soft limit on the number of level-0 files. We start slowing down writes
262    * at this point. A value &lt; 0 means that no writing slow down will be
263    * triggered by number of files in level-0.
264    *
265    * @return the soft limit on the number of level-0 files.
266    */
levelZeroSlowdownWritesTrigger()267   int levelZeroSlowdownWritesTrigger();
268 
269   /**
270    * Maximum number of level-0 files.  We stop writes at this point.
271    *
272    * @param numFiles the hard limit of the number of level-0 files.
273    * @return the reference to the current option.
274    */
setLevelZeroStopWritesTrigger(int numFiles)275   T setLevelZeroStopWritesTrigger(int numFiles);
276 
277   /**
278    * Maximum number of level-0 files.  We stop writes at this point.
279    *
280    * @return the hard limit of the number of level-0 file.
281    */
levelZeroStopWritesTrigger()282   int levelZeroStopWritesTrigger();
283 
284   /**
285    * The ratio between the total size of level-(L+1) files and the total
286    * size of level-L files for all L.
287    * DEFAULT: 10
288    *
289    * @param multiplier the ratio between the total size of level-(L+1)
290    *     files and the total size of level-L files for all L.
291    * @return the reference to the current option.
292    */
setMaxBytesForLevelMultiplier( double multiplier)293   T setMaxBytesForLevelMultiplier(
294       double multiplier);
295 
296   /**
297    * The ratio between the total size of level-(L+1) files and the total
298    * size of level-L files for all L.
299    * DEFAULT: 10
300    *
301    * @return the ratio between the total size of level-(L+1) files and
302    *     the total size of level-L files for all L.
303    */
maxBytesForLevelMultiplier()304   double maxBytesForLevelMultiplier();
305 
306   /**
307    * FIFO compaction option.
308    * The oldest table file will be deleted
309    * once the sum of table files reaches this size.
310    * The default value is 1GB (1 * 1024 * 1024 * 1024).
311    *
312    * @param maxTableFilesSize the size limit of the total sum of table files.
313    * @return the instance of the current object.
314    */
setMaxTableFilesSizeFIFO( long maxTableFilesSize)315   T setMaxTableFilesSizeFIFO(
316       long maxTableFilesSize);
317 
318   /**
319    * FIFO compaction option.
320    * The oldest table file will be deleted
321    * once the sum of table files reaches this size.
322    * The default value is 1GB (1 * 1024 * 1024 * 1024).
323    *
324    * @return the size limit of the total sum of table files.
325    */
maxTableFilesSizeFIFO()326   long maxTableFilesSizeFIFO();
327 
328   /**
329    * Get the config for mem-table.
330    *
331    * @return the mem-table config.
332    */
memTableConfig()333   MemTableConfig memTableConfig();
334 
335   /**
336    * Set the config for mem-table.
337    *
338    * @param memTableConfig the mem-table config.
339    * @return the instance of the current object.
340    * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms
341    *   while overflowing the underlying platform specific value.
342    */
setMemTableConfig(MemTableConfig memTableConfig)343   T setMemTableConfig(MemTableConfig memTableConfig);
344 
345   /**
346    * Returns the name of the current mem table representation.
347    * Memtable format can be set using setTableFormatConfig.
348    *
349    * @return the name of the currently-used memtable factory.
350    * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig)
351    */
memTableFactoryName()352   String memTableFactoryName();
353 
354   /**
355    * Get the config for table format.
356    *
357    * @return the table format config.
358    */
tableFormatConfig()359   TableFormatConfig tableFormatConfig();
360 
361   /**
362    * Set the config for table format.
363    *
364    * @param config the table format config.
365    * @return the reference of the current options.
366    */
setTableFormatConfig(TableFormatConfig config)367   T setTableFormatConfig(TableFormatConfig config);
368 
369   /**
370    * @return the name of the currently used table factory.
371    */
tableFactoryName()372   String tableFactoryName();
373 
374   /**
375    * Compression algorithm that will be used for the bottommost level that
376    * contain files. If level-compaction is used, this option will only affect
377    * levels after base level.
378    *
379    * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
380    *
381    * @param bottommostCompressionType  The compression type to use for the
382    *     bottommost level
383    *
384    * @return the reference of the current options.
385    */
setBottommostCompressionType( final CompressionType bottommostCompressionType)386   T setBottommostCompressionType(
387       final CompressionType bottommostCompressionType);
388 
389   /**
390    * Compression algorithm that will be used for the bottommost level that
391    * contain files. If level-compaction is used, this option will only affect
392    * levels after base level.
393    *
394    * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION}
395    *
396    * @return The compression type used for the bottommost level
397    */
bottommostCompressionType()398   CompressionType bottommostCompressionType();
399 
400   /**
401    * Set the options for compression algorithms used by
402    * {@link #bottommostCompressionType()} if it is enabled.
403    *
404    * To enable it, please see the definition of
405    * {@link CompressionOptions}.
406    *
407    * @param compressionOptions the bottom most compression options.
408    *
409    * @return the reference of the current options.
410    */
setBottommostCompressionOptions( final CompressionOptions compressionOptions)411   T setBottommostCompressionOptions(
412       final CompressionOptions compressionOptions);
413 
414   /**
415    * Get the bottom most compression options.
416    *
417    * See {@link #setBottommostCompressionOptions(CompressionOptions)}.
418    *
419    * @return the bottom most compression options.
420    */
bottommostCompressionOptions()421   CompressionOptions bottommostCompressionOptions();
422 
423   /**
424    * Set the different options for compression algorithms
425    *
426    * @param compressionOptions The compression options
427    *
428    * @return the reference of the current options.
429    */
setCompressionOptions( CompressionOptions compressionOptions)430   T setCompressionOptions(
431       CompressionOptions compressionOptions);
432 
433   /**
434    * Get the different options for compression algorithms
435    *
436    * @return The compression options
437    */
compressionOptions()438   CompressionOptions compressionOptions();
439 
440   /**
441    * Default memtable memory budget used with the following methods:
442    *
443    * <ol>
444    *   <li>{@link #optimizeLevelStyleCompaction()}</li>
445    *   <li>{@link #optimizeUniversalStyleCompaction()}</li>
446    * </ol>
447    */
448   long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024;
449 }
450