Skip to content

Commit

Permalink
add cache options for rocksdb (#567)
Browse files Browse the repository at this point in the history
Change-Id: Icc4ae746f3117905960b8de52ae59386e0684584
  • Loading branch information
javeme authored and zhoney committed Jun 26, 2019
1 parent ab4a26c commit f7a619f
Show file tree
Hide file tree
Showing 4 changed files with 153 additions and 22 deletions.
2 changes: 1 addition & 1 deletion hugegraph-core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
<dependency>
<groupId>com.baidu.hugegraph</groupId>
<artifactId>hugegraph-common</artifactId>
<version>1.6.3</version>
<version>1.6.5</version>
</dependency>

<!-- tinkerpop -->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.io.FileUtils;

import com.baidu.hugegraph.config.ConfigOption;
import com.baidu.hugegraph.config.HugeConfig;
import com.baidu.hugegraph.config.OptionSpace;
import com.baidu.hugegraph.config.TypedOption;
import com.baidu.hugegraph.dist.RegisterUtil;
import com.baidu.hugegraph.util.E;

Expand All @@ -52,12 +52,12 @@ public static void main(String[] args)
HugeConfig config = new HugeConfig(input);

for (String name : new TreeSet<>(OptionSpace.keys())) {
ConfigOption<?> option = OptionSpace.get(name);
TypedOption<?, ?> option = OptionSpace.get(name);
writeOption(output, option, config.get(option));
}
}

private static void writeOption(File output, ConfigOption<?> option,
private static void writeOption(File output, TypedOption<?, ?> option,
Object value) throws IOException {
StringBuilder sb = new StringBuilder();
sb.append("# ").append(option.desc()).append(EOL);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,15 @@

import static com.baidu.hugegraph.config.OptionChecker.allowValues;
import static com.baidu.hugegraph.config.OptionChecker.disallowEmpty;
import static com.baidu.hugegraph.config.OptionChecker.inValues;
import static com.baidu.hugegraph.config.OptionChecker.rangeDouble;
import static com.baidu.hugegraph.config.OptionChecker.rangeInt;

import org.rocksdb.CompactionStyle;
import org.rocksdb.CompressionType;

import com.baidu.hugegraph.config.ConfigConvOption;
import com.baidu.hugegraph.config.ConfigListConvOption;
import com.baidu.hugegraph.config.ConfigListOption;
import com.baidu.hugegraph.config.ConfigOption;
import com.baidu.hugegraph.config.OptionHolder;
Expand Down Expand Up @@ -100,11 +106,12 @@ public static synchronized RocksDBOptions instance() {
7
);

public static final ConfigOption<String> COMPACTION_STYLE =
new ConfigOption<>(
public static final ConfigConvOption<CompactionStyle> COMPACTION_STYLE =
new ConfigConvOption<>(
"rocksdb.compaction_style",
"Set compaction style for RocksDB: LEVEL/UNIVERSAL/FIFO.",
allowValues("LEVEL", "UNIVERSAL", "FIFO"),
CompactionStyle::valueOf,
"LEVEL"
);

Expand All @@ -124,11 +131,34 @@ public static synchronized RocksDBOptions instance() {
false
);

public static final ConfigOption<String> COMPRESSION_TYPE =
new ConfigOption<>(
"rocksdb.compression_type",
"The compression algorithm of RocksDB: snappy/z/bzip2/lz4/lz4hc/xpress/zstd.",
allowValues("snappy", "z", "bzip2", "lz4", "lz4hc", "xpress", "zstd"),
public static final ConfigListConvOption<String, CompressionType> LEVELS_COMPRESSIONS =
new ConfigListConvOption<>(
"rocksdb.compression_per_level",
"The compression algorithms for different levels of RocksDB, " +
"allowed values are none/snappy/z/bzip2/lz4/lz4hc/xpress/zstd.",
inValues("none", "snappy", "z", "bzip2", "lz4", "lz4hc", "xpress", "zstd"),
CompressionType::getCompressionType,
String.class,
"none", "none", "snappy", "snappy", "snappy", "snappy", "snappy"
);

public static final ConfigConvOption<CompressionType> BOTTOMMOST_COMPRESSION =
new ConfigConvOption<>(
"rocksdb.bottommost_compression",
"The compression algorithm for the bottommost level of RocksDB, " +
"allowed values are none/snappy/z/bzip2/lz4/lz4hc/xpress/zstd.",
allowValues("none", "snappy", "z", "bzip2", "lz4", "lz4hc", "xpress", "zstd"),
CompressionType::getCompressionType,
"none"
);

public static final ConfigConvOption<CompressionType> COMPRESSION =
new ConfigConvOption<>(
"rocksdb.compression",
"The compression algorithm for compressing blocks of RocksDB, " +
"allowed values are none/snappy/z/bzip2/lz4/lz4hc/xpress/zstd.",
allowValues("none", "snappy", "z", "bzip2", "lz4", "lz4hc", "xpress", "zstd"),
CompressionType::getCompressionType,
"snappy"
);

Expand Down Expand Up @@ -265,7 +295,67 @@ public static synchronized RocksDBOptions instance() {
public static final ConfigOption<Boolean> USE_DIRECT_READS_WRITES_FC =
new ConfigOption<>(
"rocksdb.use_direct_io_for_flush_and_compaction",
"Enable the OS to use direct reads and writes in flush and compaction.",
"Enable the OS to use direct read/writes in flush and compaction.",
disallowEmpty(),
false
);

public static final ConfigOption<Long> BLOCK_CACHE_CAPACITY =
new ConfigOption<>(
"rocksdb.block_cache_capacity",
"The amount of block cache in bytes that will be used by RocksDB, " +
"0 means no block cache.",
rangeInt(0L, Long.MAX_VALUE),
8L * Bytes.MB
);

public static final ConfigOption<Boolean> PIN_L0_FILTER_AND_INDEX_IN_CACHE =
new ConfigOption<>(
"rocksdb.pin_l0_filter_and_index_blocks_in_cache",
"Indicating if we'd put index/filter blocks to the block cache.",
disallowEmpty(),
false
);

public static final ConfigOption<Boolean> PUT_FILTER_AND_INDEX_IN_CACHE =
new ConfigOption<>(
"rocksdb.cache_index_and_filter_blocks",
"Indicating if we'd put index/filter blocks to the block cache.",
disallowEmpty(),
false
);

public static final ConfigOption<Integer> BLOOM_FILTER_BITS_PER_KEY =
new ConfigOption<>(
"rocksdb.bloom_filter_bits_per_key",
"The bits per key in bloom filter, a good value is 10, " +
"which yields a filter with ~ 1% false positive rate, " +
"-1 means no bloom filter.",
rangeInt(-1, Integer.MAX_VALUE),
-1
);

public static final ConfigOption<Boolean> BLOOM_FILTER_MODE =
new ConfigOption<>(
"rocksdb.bloom_filter_block_based_mode",
"Use block based filter rather than full filter.",
disallowEmpty(),
false
);

public static final ConfigOption<Boolean> BLOOM_FILTER_WHOLE_KEY =
new ConfigOption<>(
"rocksdb.bloom_filter_whole_key_filtering",
"True if place whole keys in the bloom filter, " +
"else place the prefix of keys.",
disallowEmpty(),
true
);

public static final ConfigOption<Boolean> BLOOM_FILTERS_SKIP_LAST_LEVEL =
new ConfigOption<>(
"rocksdb.optimize_filters_for_hits",
"This flag allows us to not store filters for the last level.",
disallowEmpty(),
false
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,12 @@
import java.util.NoSuchElementException;
import java.util.Set;

import org.rocksdb.BlockBasedTableConfig;
import org.rocksdb.BloomFilter;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.ColumnFamilyOptions;
import org.rocksdb.ColumnFamilyOptionsInterface;
import org.rocksdb.CompactionStyle;
import org.rocksdb.CompressionType;
import org.rocksdb.DBOptions;
import org.rocksdb.DBOptionsInterface;
Expand Down Expand Up @@ -314,27 +315,67 @@ public static void initOptions(HugeConfig conf,
cf.optimizeUniversalStyleCompaction();
}

cf.setNumLevels(conf.get(RocksDBOptions.NUM_LEVELS));
cf.setCompactionStyle(CompactionStyle.valueOf(
conf.get(RocksDBOptions.COMPACTION_STYLE)));
int numLevels = conf.get(RocksDBOptions.NUM_LEVELS);
List<CompressionType> compressions = conf.get(
RocksDBOptions.LEVELS_COMPRESSIONS);
E.checkArgument(compressions.isEmpty() ||
compressions.size() == numLevels,
"Elements number of '%s' must be 0 or " +
"be the same as '%s', bug got %s != %s",
RocksDBOptions.LEVELS_COMPRESSIONS.name(),
RocksDBOptions.NUM_LEVELS.name(),
compressions.size(), numLevels);

cf.setNumLevels(numLevels);
cf.setCompactionStyle(conf.get(RocksDBOptions.COMPACTION_STYLE));

cf.setBottommostCompressionType(
conf.get(RocksDBOptions.BOTTOMMOST_COMPRESSION));
if (!compressions.isEmpty()) {
cf.setCompressionPerLevel(compressions);
}

cf.setMinWriteBufferNumberToMerge(
conf.get(RocksDBOptions.MIN_MEMTABLES_TO_MERGE));
cf.setMaxWriteBufferNumberToMaintain(
conf.get(RocksDBOptions.MAX_MEMTABLES_TO_MAINTAIN));

// https://github.com/facebook/rocksdb/wiki/Block-Cache
BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
long cacheCapacity = conf.get(RocksDBOptions.BLOCK_CACHE_CAPACITY);
if (cacheCapacity <= 0L) {
// Bypassing bug https://github.com/facebook/rocksdb/pull/5465
tableConfig.setNoBlockCache(true);
} else {
tableConfig.setBlockCacheSize(cacheCapacity);
}
tableConfig.setPinL0FilterAndIndexBlocksInCache(
conf.get(RocksDBOptions.PIN_L0_FILTER_AND_INDEX_IN_CACHE));
tableConfig.setCacheIndexAndFilterBlocks(
conf.get(RocksDBOptions.PUT_FILTER_AND_INDEX_IN_CACHE));

// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter
int bitsPerKey = conf.get(RocksDBOptions.BLOOM_FILTER_BITS_PER_KEY);
if (bitsPerKey >= 0) {
boolean blockBased = conf.get(RocksDBOptions.BLOOM_FILTER_MODE);
tableConfig.setFilter(new BloomFilter(bitsPerKey, blockBased));
}
tableConfig.setWholeKeyFiltering(
conf.get(RocksDBOptions.BLOOM_FILTER_WHOLE_KEY));
cf.setTableFormatConfig(tableConfig);

cf.setOptimizeFiltersForHits(
conf.get(RocksDBOptions.BLOOM_FILTERS_SKIP_LAST_LEVEL));

// https://github.com/facebook/rocksdb/tree/master/utilities/merge_operators
cf.setMergeOperatorName("uint64add"); // uint64add/stringappend
}

if (mcf != null) {
mcf.setCompressionType(CompressionType.getCompressionType(
conf.get(RocksDBOptions.COMPRESSION_TYPE)));
mcf.setCompressionType(conf.get(RocksDBOptions.COMPRESSION));

mcf.setWriteBufferSize(
conf.get(RocksDBOptions.MEMTABLE_SIZE));
mcf.setMaxWriteBufferNumber(
conf.get(RocksDBOptions.MAX_MEMTABLES));
mcf.setWriteBufferSize(conf.get(RocksDBOptions.MEMTABLE_SIZE));
mcf.setMaxWriteBufferNumber(conf.get(RocksDBOptions.MAX_MEMTABLES));

mcf.setMaxBytesForLevelBase(
conf.get(RocksDBOptions.MAX_LEVEL1_BYTES));
Expand Down

0 comments on commit f7a619f

Please sign in to comment.