diff --git a/db/table_cache.cc b/db/table_cache.cc index d3728e86dc4d..02956c7c29cc 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -395,7 +395,7 @@ uint64_t TableCache::CreateRowCacheKeyPrefix(const ReadOptions& options, bool TableCache::GetFromRowCache(const Slice& user_key, IterKey& row_cache_key, size_t prefix_size, GetContext* get_context, - SequenceNumber seq_no) { + Status* read_status, SequenceNumber seq_no) { bool found = false; row_cache_key.TrimAppend(prefix_size, user_key.data(), user_key.size()); @@ -414,9 +414,8 @@ bool TableCache::GetFromRowCache(const Slice& user_key, IterKey& row_cache_key, row_cache.RegisterReleaseAsCleanup(row_handle, value_pinner); // If row cache hit, knowing cache key is the same to row_cache_key, // can use row_cache_key's seq no to construct InternalKey. - replayGetContextLog(*row_cache.Value(row_handle), user_key, get_context, - &value_pinner, seq_no) - .PermitUncheckedError(); // TODO + *read_status = replayGetContextLog(*row_cache.Value(row_handle), user_key, + get_context, &value_pinner, seq_no); RecordTick(ioptions_.stats, ROW_CACHE_HIT); found = true; } else { @@ -441,21 +440,20 @@ Status TableCache::Get( // Check row cache if enabled. // Reuse row_cache_key sequence number when row cache hits. + Status s; if (ioptions_.row_cache && !get_context->NeedToReadSequence()) { auto user_key = ExtractUserKey(k); uint64_t cache_entry_seq_no = CreateRowCacheKeyPrefix(options, fd, k, get_context, row_cache_key); done = GetFromRowCache(user_key, row_cache_key, row_cache_key.Size(), - get_context, cache_entry_seq_no); + get_context, &s, cache_entry_seq_no); if (!done) { row_cache_entry = &row_cache_entry_buffer; } } - Status s; TableReader* t = fd.table_reader; TypedHandle* handle = nullptr; - if (!done) { - assert(s.ok()); + if (s.ok() && !done) { if (t == nullptr) { s = FindTable(options, file_options_, internal_comparator, file_meta, &handle, block_protection_bytes_per_key, prefix_extractor, diff --git a/db/table_cache.h b/db/table_cache.h index ae3fc93c376c..cf3cd25c9147 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -273,6 +273,7 @@ class TableCache { // user key to row_cache_key at offset prefix_size bool GetFromRowCache(const Slice& user_key, IterKey& row_cache_key, size_t prefix_size, GetContext* get_context, + Status* read_status, SequenceNumber seq_no = kMaxSequenceNumber); const ImmutableOptions& ioptions_; @@ -286,4 +287,4 @@ class TableCache { std::string db_session_id_; }; -} // namespace ROCKSDB_NAMESPACE \ No newline at end of file +} // namespace ROCKSDB_NAMESPACE diff --git a/db/table_cache_sync_and_async.h b/db/table_cache_sync_and_async.h index a06b7d43164b..e0a14d629e71 100644 --- a/db/table_cache_sync_and_async.h +++ b/db/table_cache_sync_and_async.h @@ -50,8 +50,14 @@ DEFINE_SYNC_AND_ASYNC(Status, TableCache::MultiGet) GetContext* get_context = miter->get_context; - if (GetFromRowCache(user_key, row_cache_key, row_cache_key_prefix_size, - get_context)) { + Status read_status; + bool ret = + GetFromRowCache(user_key, row_cache_key, row_cache_key_prefix_size, + get_context, &read_status); + if (!read_status.ok()) { + return read_status; + } + if (ret) { table_range.SkipKey(miter); } else { row_cache_entries.emplace_back();