From ed74ed10b402029429b20194e0806a0bea02c24c Mon Sep 17 00:00:00 2001 From: dungba88 Date: Wed, 3 Apr 2024 21:04:03 +0900 Subject: [PATCH] Rename NodeHash to FSTSuffixNodeCache --- .../apache/lucene/util/fst/FSTCompiler.java | 12 +++++----- ...{NodeHash.java => FSTSuffixNodeCache.java} | 22 ++++++++++++++++--- ...eHash.java => TestFSTSuffixNodeCache.java} | 10 +++++---- 3 files changed, 31 insertions(+), 13 deletions(-) rename lucene/core/src/java/org/apache/lucene/util/fst/{NodeHash.java => FSTSuffixNodeCache.java} (93%) rename lucene/core/src/test/org/apache/lucene/util/fst/{TestNodeHash.java => TestFSTSuffixNodeCache.java} (84%) diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java b/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java index e837b7775e0c..51f5c67d1af3 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/FSTCompiler.java @@ -98,8 +98,8 @@ public class FSTCompiler { // it will throw exceptions if attempt to call getReverseBytesReader() or writeTo(DataOutput) private static final FSTReader NULL_FST_READER = new NullFSTReader(); - private final NodeHash dedupHash; - // a temporary FST used during building for NodeHash cache + private final FSTSuffixNodeCache suffixDedupCache; + // a temporary FST used during building for FSTSuffixNodeCache cache final FST fst; private final T NO_OUTPUT; @@ -178,9 +178,9 @@ private FSTCompiler( if (suffixRAMLimitMB < 0) { throw new IllegalArgumentException("ramLimitMB must be >= 0; got: " + suffixRAMLimitMB); } else if (suffixRAMLimitMB > 0) { - dedupHash = new NodeHash<>(this, suffixRAMLimitMB); + suffixDedupCache = new FSTSuffixNodeCache<>(this, suffixRAMLimitMB); } else { - dedupHash = null; + suffixDedupCache = null; } NO_OUTPUT = outputs.getNoOutput(); @@ -379,12 +379,12 @@ public long getArcCount() { private CompiledNode compileNode(UnCompiledNode nodeIn) throws IOException { final long node; long bytesPosStart = numBytesWritten; - if (dedupHash != null) { + if (suffixDedupCache != null) { if (nodeIn.numArcs == 0) { node = addNode(nodeIn); lastFrozenNode = node; } else { - node = dedupHash.add(nodeIn); + node = suffixDedupCache.add(nodeIn); } } else { node = addNode(nodeIn); diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java b/lucene/core/src/java/org/apache/lucene/util/fst/FSTSuffixNodeCache.java similarity index 93% rename from lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java rename to lucene/core/src/java/org/apache/lucene/util/fst/FSTSuffixNodeCache.java index 7326fd77f73b..f33f09e90723 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/FSTSuffixNodeCache.java @@ -31,8 +31,24 @@ // TODO: couldn't we prune naturally back until we see a transition with an output? it's highly // unlikely (mostly impossible) such suffixes can be shared? -// Used to dedup states (lookup already-frozen states) -final class NodeHash { +/** + * This is essentially a LRU cache to maintain and lookup node suffix. Un-compiled node can be added + * into the cache and if a similar node exists we will return its address in the FST. A node is + * defined as similar if it has the same label, arcs, outputs & other properties that identify a + * node. + * + *

The total size of the cache is controlled through the constructor parameter ramLimitMB + * Implementation-wise, we maintain two lookup tables, a primary table where node can be + * looked up from, and a fallback lookup table in case the lookup in the primary table fails. Nodes + * from the fallback table can also be promoted to the primary table when that happens. When the + * primary table is full, we swap it with the fallback table and clear out the primary table. + * + *

To lookup the node address, we build a special hash table which maps from the Node hash value + * to the Node address in the FST, called PagedGrowableHash. Internally it uses {@link + * PagedGrowableWriter} to store the mapping, which allows efficient packing the hash & address long + * values, and uses {@link ByteBlockPool} to store the actual node content (arcs & outputs). + */ +final class FSTSuffixNodeCache { // primary table -- we add nodes into this until it reaches the requested tableSizeLimit/2, then // we move it to fallback @@ -60,7 +76,7 @@ final class NodeHash { * recently used suffixes are discarded, and the FST is no longer minimalI. Still, larger * ramLimitMB will make the FST smaller (closer to minimal). */ - public NodeHash(FSTCompiler fstCompiler, double ramLimitMB) { + public FSTSuffixNodeCache(FSTCompiler fstCompiler, double ramLimitMB) { if (ramLimitMB <= 0) { throw new IllegalArgumentException("ramLimitMB must be > 0; got: " + ramLimitMB); } diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestNodeHash.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTSuffixNodeCache.java similarity index 84% rename from lucene/core/src/test/org/apache/lucene/util/fst/TestNodeHash.java rename to lucene/core/src/test/org/apache/lucene/util/fst/TestFSTSuffixNodeCache.java index 8319f20efea3..2fb93c7f5a46 100644 --- a/lucene/core/src/test/org/apache/lucene/util/fst/TestNodeHash.java +++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTSuffixNodeCache.java @@ -19,14 +19,16 @@ import com.carrotsearch.randomizedtesting.generators.RandomBytes; import org.apache.lucene.tests.util.LuceneTestCase; -public class TestNodeHash extends LuceneTestCase { +public class TestFSTSuffixNodeCache extends LuceneTestCase { public void testCopyFallbackNodeBytes() { // we don't need the FSTCompiler in this test - NodeHash nodeHash = new NodeHash<>(null, 1); + FSTSuffixNodeCache suffixCache = new FSTSuffixNodeCache<>(null, 1); - NodeHash.PagedGrowableHash primaryHashTable = nodeHash.new PagedGrowableHash(); - NodeHash.PagedGrowableHash fallbackHashTable = nodeHash.new PagedGrowableHash(); + FSTSuffixNodeCache.PagedGrowableHash primaryHashTable = + suffixCache.new PagedGrowableHash(); + FSTSuffixNodeCache.PagedGrowableHash fallbackHashTable = + suffixCache.new PagedGrowableHash(); int nodeLength = atLeast(500); long fallbackHashSlot = 1; byte[] fallbackBytes = RandomBytes.randomBytesOfLength(random(), nodeLength);