summaryrefslogtreecommitdiff
path: root/fs/f2fs/segment.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-11-30 10:10:31 +0900
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-12-23 10:18:05 +0900
commit187b5b8b3dfcfc73126f2743c89cc47df3bf07be (patch)
tree0be0b06f1c04b8d87037250317e09048643d0d12 /fs/f2fs/segment.c
parent8f99a946f360c4083d6e323e10a5928e1ce385a4 (diff)
f2fs: remove the own bi_private allocation
Previously f2fs allocates its own bi_private data structure all the time even though we don't use it. But, can we remove this bi_private allocation? This patch removes such the additional bi_private allocation. 1. Retrieve f2fs_sb_info from its page->mapping->host->i_sb. - This removes the usecases of bi_private in end_io. 2. Use bi_private only when we really need it. - The bi_private is used only when the checkpoint procedure is conducted. - When conducting the checkpoint, f2fs submits a META_FLUSH bio to wait its bio completion. - Since we have no dependancies to remove bi_private now, let's just use bi_private pointer as the completion pointer. Reviewed-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/segment.c')
-rw-r--r--fs/f2fs/segment.c43
1 files changed, 16 insertions, 27 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 03878634a0fe..0db40271f0d8 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -791,7 +791,7 @@ static void f2fs_end_io_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct bio_private *p = bio->bi_private;
+ struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
do {
struct page *page = bvec->bv_page;
@@ -802,21 +802,21 @@ static void f2fs_end_io_write(struct bio *bio, int err)
SetPageError(page);
if (page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
- set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
- p->sbi->sb->s_flags |= MS_RDONLY;
+
+ set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ sbi->sb->s_flags |= MS_RDONLY;
}
end_page_writeback(page);
- dec_page_count(p->sbi, F2FS_WRITEBACK);
+ dec_page_count(sbi, F2FS_WRITEBACK);
} while (bvec >= bio->bi_io_vec);
- if (p->is_sync)
- complete(p->wait);
+ if (bio->bi_private)
+ complete(bio->bi_private);
- if (!get_pages(p->sbi, F2FS_WRITEBACK) &&
- !list_empty(&p->sbi->cp_wait.task_list))
- wake_up(&p->sbi->cp_wait);
+ if (!get_pages(sbi, F2FS_WRITEBACK) &&
+ !list_empty(&sbi->cp_wait.task_list))
+ wake_up(&sbi->cp_wait);
- kfree(p);
bio_put(bio);
}
@@ -838,7 +838,6 @@ static void do_submit_bio(struct f2fs_sb_info *sbi,
int rw = sync ? WRITE_SYNC : WRITE;
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = &sbi->write_io[btype];
- struct bio_private *p;
if (!io->bio)
return;
@@ -851,18 +850,16 @@ static void do_submit_bio(struct f2fs_sb_info *sbi,
trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio);
- p = io->bio->bi_private;
- p->sbi = sbi;
- io->bio->bi_end_io = f2fs_end_io_write;
-
+ /*
+ * META_FLUSH is only from the checkpoint procedure, and we should wait
+ * this metadata bio for FS consistency.
+ */
if (type == META_FLUSH) {
DECLARE_COMPLETION_ONSTACK(wait);
- p->is_sync = true;
- p->wait = &wait;
+ io->bio->bi_private = &wait;
submit_bio(rw, io->bio);
wait_for_completion(&wait);
} else {
- p->is_sync = false;
submit_bio(rw, io->bio);
}
io->bio = NULL;
@@ -897,18 +894,10 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
do_submit_bio(sbi, type, false);
alloc_new:
if (io->bio == NULL) {
- struct bio_private *priv;
-retry:
- priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
- if (!priv) {
- cond_resched();
- goto retry;
- }
-
bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
io->bio = f2fs_bio_alloc(bdev, bio_blocks);
io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
- io->bio->bi_private = priv;
+ io->bio->bi_end_io = f2fs_end_io_write;
/*
* The end_io will be assigned at the sumbission phase.
* Until then, let bio_add_page() merge consecutive IOs as much