Skip to content

Commit

Permalink
storage: fix the tiny prefetch request for batch chunks
Browse files Browse the repository at this point in the history
By passing the chunk continuous check, and correctly sort batch chunks,
the prefetch request will not be interrupted by batch chunks anymore.

Signed-off-by: Wenhao Ren <[email protected]>
  • Loading branch information
hangvane authored and Desiki-high committed Dec 21, 2023
1 parent 808e41a commit e1aca7d
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 3 deletions.
2 changes: 1 addition & 1 deletion storage/src/cache/cachedfile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ impl BlobCache for FileCacheEntry {
// Then handle fs prefetch
let max_comp_size = self.prefetch_batch_size();
let mut bios = bios.to_vec();
bios.sort_unstable_by_key(|entry| entry.chunkinfo.compressed_offset());
bios.sort_by_key(|entry| entry.chunkinfo.compressed_offset());
self.metrics.prefetch_unmerged_chunks.add(bios.len() as u64);
BlobIoMergeState::merge_and_issue(
&bios,
Expand Down
10 changes: 8 additions & 2 deletions storage/src/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,12 @@ impl BlobIoDesc {
let prev_end = self.chunkinfo.compressed_offset() + self.chunkinfo.compressed_size() as u64;
let next_offset = next.chunkinfo.compressed_offset();

if self.chunkinfo.is_batch() || next.chunkinfo.is_batch() {
// Batch chunk can only be compared by uncompressed info.
return next.chunkinfo.uncompressed_offset() - self.chunkinfo.uncompressed_end()
<= max_gap;
}

if self.chunkinfo.blob_index() == next.chunkinfo.blob_index() && next_offset >= prev_end {
if next.blob.is_legacy_stargz() {
next_offset - prev_end <= max_gap * 8
Expand Down Expand Up @@ -999,15 +1005,15 @@ impl BlobIoRange {
}

/// Merge an `BlobIoDesc` into the `BlobIoRange` object.
pub fn merge(&mut self, bio: &BlobIoDesc, max_gap: u64) {
pub fn merge(&mut self, bio: &BlobIoDesc, _max_gap: u64) {
let end = self.blob_offset + self.blob_size;
let offset = bio.chunkinfo.compressed_offset();
let size = bio.chunkinfo.compressed_size() as u64;
let size = if end == offset {
assert!(offset.checked_add(size).is_some());
size
} else {
assert!((offset > end && offset - end <= max_gap));
assert!(offset > end);
size + (offset - end)
};
assert!(end.checked_add(size).is_some());
Expand Down

0 comments on commit e1aca7d

Please sign in to comment.