Skip to content

Commit

Permalink
Btrfs: Make raid_map array be inlined in btrfs_bio structure
Browse files Browse the repository at this point in the history
It can make code more simple and clear, we need not care about
free bbio and raid_map together.

Signed-off-by: Miao Xie <[email protected]>
Signed-off-by: Zhao Lei <[email protected]>
Signed-off-by: Chris Mason <[email protected]>
  • Loading branch information
zhaoleidd authored and masoncl committed Jan 22, 2015
1 parent cc7539e commit 8e5cfb5
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 125 deletions.
77 changes: 33 additions & 44 deletions fs/btrfs/raid56.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,13 +79,6 @@ struct btrfs_raid_bio {
struct btrfs_fs_info *fs_info;
struct btrfs_bio *bbio;

/*
* logical block numbers for the start of each stripe
* The last one or two are p/q. These are sorted,
* so raid_map[0] is the start of our full stripe
*/
u64 *raid_map;

/* while we're doing rmw on a stripe
* we put it into a hash table so we can
* lock the stripe and merge more rbios
Expand Down Expand Up @@ -303,7 +296,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
*/
static int rbio_bucket(struct btrfs_raid_bio *rbio)
{
u64 num = rbio->raid_map[0];
u64 num = rbio->bbio->raid_map[0];

/*
* we shift down quite a bit. We're using byte
Expand Down Expand Up @@ -606,8 +599,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
test_bit(RBIO_CACHE_BIT, &cur->flags))
return 0;

if (last->raid_map[0] !=
cur->raid_map[0])
if (last->bbio->raid_map[0] !=
cur->bbio->raid_map[0])
return 0;

/* we can't merge with different operations */
Expand Down Expand Up @@ -689,7 +682,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) {
walk++;
if (cur->raid_map[0] == rbio->raid_map[0]) {
if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
spin_lock(&cur->bio_list_lock);

/* can we steal this cached rbio's pages? */
Expand Down Expand Up @@ -842,18 +835,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
}

static inline void
__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
__free_bbio(struct btrfs_bio *bbio, int need)
{
if (need) {
kfree(raid_map);
if (need)
kfree(bbio);
}
}

static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
static inline void free_bbio(struct btrfs_raid_bio *rbio)
{
__free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
!test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
__free_bbio(rbio->bbio,
!test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
}

static void __free_raid_bio(struct btrfs_raid_bio *rbio)
Expand All @@ -875,7 +866,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
}
}

free_bbio_and_raid_map(rbio);
free_bbio(rbio);

kfree(rbio);
}
Expand Down Expand Up @@ -985,8 +976,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* this does not allocate any pages for rbio->pages.
*/
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len)
struct btrfs_bio *bbio, u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
Expand All @@ -1007,7 +997,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
rbio->raid_map = raid_map;
rbio->fs_info = root->fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
Expand All @@ -1028,7 +1017,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
rbio->bio_pages = p + sizeof(struct page *) * num_pages;
rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;

if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
nr_data = real_stripes - 2;
else
nr_data = real_stripes - 1;
Expand Down Expand Up @@ -1182,7 +1171,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
spin_lock_irq(&rbio->bio_list_lock);
bio_list_for_each(bio, &rbio->bio_list) {
start = (u64)bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->raid_map[0];
stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_CACHE_SHIFT;

for (i = 0; i < bio->bi_vcnt; i++) {
Expand Down Expand Up @@ -1402,7 +1391,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
logical <<= 9;

for (i = 0; i < rbio->nr_data; i++) {
stripe_start = rbio->raid_map[i];
stripe_start = rbio->bbio->raid_map[i];
if (logical >= stripe_start &&
logical < stripe_start + rbio->stripe_len) {
return i;
Expand Down Expand Up @@ -1776,17 +1765,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
* our main entry point for writes from the rest of the FS.
*/
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len)
struct btrfs_bio *bbio, u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
int ret;

rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
rbio = alloc_rbio(root, bbio, stripe_len);
if (IS_ERR(rbio)) {
__free_bbio_and_raid_map(bbio, raid_map, 1);
__free_bbio(bbio, 1);
return PTR_ERR(rbio);
}
bio_list_add(&rbio->bio_list, bio);
Expand Down Expand Up @@ -1885,7 +1873,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
}

/* all raid6 handling here */
if (rbio->raid_map[rbio->real_stripes - 1] ==
if (rbio->bbio->raid_map[rbio->real_stripes - 1] ==
RAID6_Q_STRIPE) {

/*
Expand Down Expand Up @@ -1922,8 +1910,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
* here due to a crc mismatch and we can't give them the
* data they want
*/
if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->bbio->raid_map[faila] ==
RAID5_P_STRIPE) {
err = -EIO;
goto cleanup;
}
Expand All @@ -1934,7 +1923,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
goto pstripe;
}

if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
raid6_datap_recov(rbio->real_stripes,
PAGE_SIZE, faila, pointers);
} else {
Expand Down Expand Up @@ -2156,15 +2145,15 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* of the drive.
*/
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len, int mirror_num, int generic_io)
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io)
{
struct btrfs_raid_bio *rbio;
int ret;

rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
rbio = alloc_rbio(root, bbio, stripe_len);
if (IS_ERR(rbio)) {
__free_bbio_and_raid_map(bbio, raid_map, generic_io);
__free_bbio(bbio, generic_io);
return PTR_ERR(rbio);
}

Expand All @@ -2175,7 +2164,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
BUG();
__free_bbio_and_raid_map(bbio, raid_map, generic_io);
__free_bbio(bbio, generic_io);
kfree(rbio);
return -EIO;
}
Expand Down Expand Up @@ -2240,14 +2229,14 @@ static void read_rebuild_work(struct btrfs_work *work)

struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len, struct btrfs_device *scrub_dev,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
{
struct btrfs_raid_bio *rbio;
int i;

rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
rbio = alloc_rbio(root, bbio, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
Expand Down Expand Up @@ -2279,10 +2268,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
int stripe_offset;
int index;

ASSERT(logical >= rbio->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
ASSERT(logical >= rbio->bbio->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->raid_map[0]);
stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
index = stripe_offset >> PAGE_CACHE_SHIFT;
rbio->bio_pages[index] = page;
}
Expand Down
11 changes: 5 additions & 6 deletions fs/btrfs/raid56.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,15 @@ struct btrfs_raid_bio;
struct btrfs_device;

int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len, int mirror_num, int generic_io);
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len);
struct btrfs_bio *bbio, u64 stripe_len);

struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len, struct btrfs_device *scrub_dev,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
struct page *page, u64 logical);
Expand Down
31 changes: 10 additions & 21 deletions fs/btrfs/scrub.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ struct scrub_ctx;
struct scrub_recover {
atomic_t refs;
struct btrfs_bio *bbio;
u64 *raid_map;
u64 map_length;
};

Expand Down Expand Up @@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover)
{
if (atomic_dec_and_test(&recover->refs)) {
kfree(recover->bbio);
kfree(recover->raid_map);
kfree(recover);
}
}
Expand Down Expand Up @@ -1296,12 +1294,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
return 0;
}

static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
{
if (raid_map) {
if (bbio->raid_map) {
int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;

if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
return 3;
else
return 2;
Expand Down Expand Up @@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
{
struct scrub_recover *recover;
struct btrfs_bio *bbio;
u64 *raid_map;
u64 sublen;
u64 mapped_length;
u64 stripe_offset;
Expand All @@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
sublen = min_t(u64, length, PAGE_SIZE);
mapped_length = sublen;
bbio = NULL;
raid_map = NULL;

/*
* with a length of PAGE_SIZE, each returned stripe
* represents one mirror
*/
ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
&mapped_length, &bbio, 0, &raid_map);
&mapped_length, &bbio, 0, 1);
if (ret || !bbio || mapped_length < sublen) {
kfree(bbio);
kfree(raid_map);
return -EIO;
}

recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
if (!recover) {
kfree(bbio);
kfree(raid_map);
return -ENOMEM;
}

atomic_set(&recover->refs, 1);
recover->bbio = bbio;
recover->raid_map = raid_map;
recover->map_length = mapped_length;

BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);

nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
nmirrors = scrub_nr_raid_mirrors(bbio);
for (mirror_index = 0; mirror_index < nmirrors;
mirror_index++) {
struct scrub_block *sblock;
Expand All @@ -1420,7 +1413,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
sblock->pagev[page_index] = page;
page->logical = logical;

scrub_stripe_index_and_offset(logical, raid_map,
scrub_stripe_index_and_offset(logical, bbio->raid_map,
mapped_length,
bbio->num_stripes -
bbio->num_tgtdevs,
Expand Down Expand Up @@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)

static inline int scrub_is_page_on_raid56(struct scrub_page *page)
{
return page->recover && page->recover->raid_map;
return page->recover && page->recover->bbio->raid_map;
}

static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
Expand All @@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_end_io = scrub_bio_wait_endio;

ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
page->recover->raid_map,
page->recover->map_length,
page->mirror_num, 0);
if (ret)
Expand Down Expand Up @@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
struct btrfs_raid_bio *rbio;
struct scrub_page *spage;
struct btrfs_bio *bbio = NULL;
u64 *raid_map = NULL;
u64 length;
int ret;

Expand All @@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
length = sparity->logic_end - sparity->logic_start + 1;
ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
sparity->logic_start,
&length, &bbio, 0, &raid_map);
if (ret || !bbio || !raid_map)
&length, &bbio, 0, 1);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;

bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
Expand All @@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio->bi_end_io = scrub_parity_bio_endio;

rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
raid_map, length,
sparity->scrub_dev,
length, sparity->scrub_dev,
sparity->dbitmap,
sparity->nsectors);
if (!rbio)
Expand All @@ -2759,7 +2749,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio_put(bio);
bbio_out:
kfree(bbio);
kfree(raid_map);
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors);
spin_lock(&sctx->stat_lock);
Expand Down
Loading

0 comments on commit 8e5cfb5

Please sign in to comment.