Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(ledger): database reliability -> hashmap db #400

Draft
wants to merge 6 commits into
base: dnut/fix/disk-allocator
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ jobs:
- name: test
run: |
zig build test -Denable-tsan=true
zig build test -Denable-tsan=true -Dblockstore-db=hashmap -Dfilter=ledger
zig build test -Denable-tsan=true -Dblockstore-db=rocksdb -Dfilter=ledger

kcov_test:
strategy:
Expand Down
2 changes: 1 addition & 1 deletion build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ pub fn build(b: *Build) void {
const filters = b.option([]const []const u8, "filter", "List of filters, used for example to filter unit tests by name"); // specified as a series like `-Dfilter="filter1" -Dfilter="filter2"`
const enable_tsan = b.option(bool, "enable-tsan", "Enable TSan for the test suite");
const no_run = b.option(bool, "no-run", "Do not run the selected step and install it") orelse false;
const blockstore_db = b.option(BlockstoreDB, "blockstore-db", "Blockstore database backend") orelse .rocksdb;
const blockstore_db = b.option(BlockstoreDB, "blockstore", "Blockstore database backend") orelse .hashmap;

// Build options
const build_options = b.addOptions();
Expand Down
166 changes: 103 additions & 63 deletions src/ledger/database/hashmap.zig

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions src/ledger/database/rocksdb.zig
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@ pub fn RocksDB(comptime column_families: []const ColumnFamily) type {
db: rocks.DB,
logger: ScopedLogger(LOG_SCOPE),
cf_handles: []const rocks.ColumnFamilyHandle,
path: []const u8,
path: [:0]const u8,

const Self = @This();

pub fn open(allocator: Allocator, logger_: Logger, path: []const u8) Error!Self {
const logger = logger_.withScope(LOG_SCOPE);
logger.info().log("Initializing RocksDB");
const owned_path = try allocator.dupe(u8, path);
const owned_path = try std.fmt.allocPrintZ(allocator, "{s}/rocksdb", .{path});

// allocate cf descriptions
const column_family_descriptions = try allocator
Expand All @@ -53,7 +53,7 @@ pub fn RocksDB(comptime column_families: []const ColumnFamily) type {
rocks.DB.open,
.{
allocator,
path,
owned_path,
.{ .create_if_missing = true, .create_missing_column_families = true },
column_family_descriptions,
},
Expand Down
32 changes: 25 additions & 7 deletions src/ledger/reader.zig
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ pub const BlockstoreReader = struct {
.initCapacity(self.allocator, slot_transactions.items.len);
errdefer {
for (txns_with_statuses.items) |item| {
item.deinit(self.db.allocator);
item.deinit(self.allocator);
}
txns_with_statuses.deinit();
}
Expand Down Expand Up @@ -666,7 +666,11 @@ pub const BlockstoreReader = struct {

const max_root = self.max_root.load(.monotonic);
var confirmed_unrooted_slots = AutoHashMap(Slot, void).init(self.allocator);
var iterator = AncestorIterator{ .db = &self.db, .next_slot = highest_confirmed_slot };
var iterator = AncestorIterator{
.allocator = self.allocator,
.db = &self.db,
.next_slot = highest_confirmed_slot,
};
while (try iterator.next()) |slot| {
if (slot <= max_root) break;
try confirmed_unrooted_slots.put(slot, {});
Expand Down Expand Up @@ -744,7 +748,11 @@ pub const BlockstoreReader = struct {
var confirmed_unrooted_slots = AutoHashMap(Slot, void).init(self.allocator);
defer confirmed_unrooted_slots.deinit();
const max_root = self.max_root.load(.monotonic);
var ancestor_iterator = AncestorIterator{ .db = &self.db, .next_slot = highest_slot };
var ancestor_iterator = AncestorIterator{
.allocator = self.allocator,
.db = &self.db,
.next_slot = highest_slot,
};
while (try ancestor_iterator.next()) |slot| {
if (slot <= max_root) break;
try confirmed_unrooted_slots.put(slot, {});
Expand Down Expand Up @@ -1474,17 +1482,27 @@ const BlockstoreRpcApiMetrics = struct {
};

pub const AncestorIterator = struct {
allocator: Allocator,
db: *BlockstoreDB,
next_slot: ?Slot,

pub fn initExclusive(db: *BlockstoreDB, start_slot: Slot) !AncestorIterator {
var self = AncestorIterator.initInclusive(db, start_slot);
pub fn initExclusive(
allocator: Allocator,
db: *BlockstoreDB,
start_slot: Slot,
) !AncestorIterator {
var self = AncestorIterator.initInclusive(allocator, db, start_slot);
_ = try self.next();
return self;
}

pub fn initInclusive(db: *BlockstoreDB, start_slot: Slot) AncestorIterator {
pub fn initInclusive(
allocator: Allocator,
db: *BlockstoreDB,
start_slot: Slot,
) AncestorIterator {
return .{
.allocator = allocator,
.db = db,
.next_slot = start_slot,
};
Expand All @@ -1494,7 +1512,7 @@ pub const AncestorIterator = struct {
if (self.next_slot) |slot| {
if (slot == 0) {
self.next_slot = null;
} else if (try self.db.get(self.db.allocator, schema.slot_meta, slot)) |slot_meta| {
} else if (try self.db.get(self.allocator, schema.slot_meta, slot)) |slot_meta| {
defer slot_meta.deinit();
self.next_slot = slot_meta.parent_slot;
} else {
Expand Down
3 changes: 2 additions & 1 deletion src/ledger/result_writer.zig
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,8 @@ pub const LedgerResultWriter = struct {
} else self.max_root.load(.monotonic);
const end_slot = maybe_end_slot orelse lowest_cleanup_slot.get().*;

var ancestor_iterator = try AncestorIterator.initExclusive(&self.db, start_root);
var ancestor_iterator = try AncestorIterator
.initExclusive(self.allocator, &self.db, start_root);

var find_missing_roots_timer = try Timer.start();
var roots_to_fix = ArrayList(Slot).init(self.allocator);
Expand Down
Loading